repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
alexandreday/fast_density_clustering | build/lib/fdc/plotting.py | 2 | 16580 | '''
Created on Jan 16, 2017
@author: Alexandre Day
'''
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patheffects as PathEffects
from .mycolors import COLOR_PALETTE
from .fdc import FDC
import math
def set_nice_font(size = 18, usetex=False):
font = {'family' : 'serif', 'size' : size}
plt.rc('font', **font)
if usetex is True:
plt.rc('text', usetex=True)
def density_map(X, z,
xlabel=None, ylabel=None, zlabel=None, label=None,
centers=None,
psize=20,
out_file=None, title=None, show=True, cmap='coolwarm',
remove_tick=False,
use_perc=False,
rasterized = True,
fontsize = 15,
vmax = None,
vmin = None
):
"""Plots a 2D density map given x,y coordinates and an intensity z for
every data point
Parameters
----------
X : array-like, shape=[n_samples,2]
Input points.
z : array-like, shape=[n_samples]
Density at every point
Returns
-------
None
"""
x, y = X[:,0], X[:,1]
fontsize = fontsize
if use_perc :
n_sample = len(x)
outlier_window = int(0.05 * n_sample)
argz = np.argsort(z)
bot_outliers = argz[:outlier_window]
top_outliers = argz[-outlier_window:]
typical = argz[outlier_window:-outlier_window]
# plot typical
plt.scatter(x[typical],y[typical],c=z[typical],cmap=cmap,s=psize, alpha=1.0,rasterized=rasterized)
cb=plt.colorbar()
# plot bot outliers (black !)
plt.scatter(x[bot_outliers],y[bot_outliers],c='black',s=psize,alpha=1.0,rasterized=rasterized)
# plot top outliers (green !)
plt.scatter(x[top_outliers],y[top_outliers],c='#36DA36',s=psize,alpha=1.0,rasterized=rasterized)
else:
if label is not None:
plt.scatter(x,y,c=z,cmap=cmap,s=psize,alpha=1.0,rasterized=rasterized,label=label,vmax=vmax,vmin=vmin)
else:
plt.scatter(x,y,c=z,cmap=cmap,s=psize,alpha=1.0,rasterized=rasterized, vmax=vmax,vmin=vmin)
cb=plt.colorbar()
if remove_tick:
plt.tick_params(labelbottom='off',labelleft='off')
if xlabel is not None:
plt.xlabel(xlabel,fontsize=fontsize)
if ylabel is not None:
plt.ylabel(ylabel,fontsize=fontsize)
if zlabel is not None:
cb.set_label(label=zlabel,labelpad=10)
if title is not None:
plt.title(title,fontsize=fontsize)
if label is not None:
plt.legend(loc='best')
if centers is not None:
plt.scatter(centers[:,0],centers[:,1], c='lightgreen', marker='*',s=200, edgecolor='black',linewidths=0.5)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
def scatter_w_label(x, y, z, psize=20, label = None):
unique_z=np.sort(np.unique(z.flatten()))
mycol = COLOR_PALETTE()
plt.subplots(figsize=(8,6))
for i, zval in enumerate(unique_z):
pos=(z.flatten()==zval)
if label is not None:
plt.scatter(x[pos],y[pos],s=psize,c=mycol[i], label=label[i], rasterized=True)
else:
plt.scatter(x[pos],y[pos],s=psize,c=mycol[i], rasterized=True)
if label is not None:
plt.legend(loc='best',fontsize=12)
plt.tight_layout()
plt.show()
def plot_true_label(X, palette, y=None, fontsize = 15, psize = 20):
plt.title('True labels', fontsize=fontsize)
print("--> Plotting summary: True clustered labels, inferred labels and density map ")
if y is None:
plt.scatter(X[:,0],X[:,1],c=palette[0],rasterized=True)
else:
y_unique = np.unique(y)
for i, yu in enumerate(y_unique):
pos=(y==yu)
plt.scatter(X[pos,0],X[pos,1], s=psize, c=palette[i],rasterized=True)
def plot_inferred_label(ax, X, idx_centers, cluster_label, palette, psize = 20, eta = None, eta_show = True, fontsize=15):
n_center = len(idx_centers)
for i in range(n_center):
pos=(cluster_label==i)
plt.scatter(X[pos,0],X[pos,1],c=palette[i], s=psize, rasterized=True)
centers = X[idx_centers]
for xy, i in zip(centers, range(n_center)) :
# Position of each label.
txt = ax.annotate(str(i),xy,
xytext=(0,0), textcoords='offset points',
fontsize=20,horizontalalignment='center', verticalalignment='center'
)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
xmin,xmax = plt.xlim()
ymin,ymax = plt.ylim()
dx = xmax - xmin
dy = ymax - ymin
if eta is not None: # displaying eta parameter
if eta_show:
txt = ax.annotate("$\eta=%.2f$"%eta,[xmin+0.15*dx,ymin+0.05*dy], xytext=(0,0), textcoords='offset points',
fontsize=20,horizontalalignment='center', verticalalignment='center')
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
plt.title('Inferred labels',fontsize=fontsize)
plt.tight_layout()
def summary(idx_centers, cluster_label, rho, X, eta = None, eta_show = True, y=None, psize=20, savefile=None, show=False,
plot_to_show = None
):
""" Summary plots : original labels (if available), inferred labels and density map used for clustering """
fontsize=15
n_sample=X.shape[0]
n_center=idx_centers.shape[0]
palette=COLOR_PALETTE()
plt.figure(1,figsize=(22,10))
plt.subplot(131)
plot_true_label(X, palette, y=y,fontsize=fontsize, psize = psize)
ax = plt.subplot(132)
plot_inferred_label(ax, X, idx_centers, cluster_label, palette, psize =psize, eta = eta, eta_show = eta_show, fontsize=fontsize)
plt.subplot(133)
density_map(X,rho,centers=X[idx_centers],title='Density map', psize=psize, show=False)
plt.tight_layout()
if savefile:
plt.savefig(savefile, dpi=300)
if show is True:
plt.show()
plt.clf()
def summary_model(model, eta=None, ytrue = None, show=True, savefile = None, eta_show = True):
""" Summary figure passing in only an FDC object (model), noise can be specified via the eta parameter """
if eta is None:
eta_ = model.eta
idx_centers = model.idx_centers
cluster_label = model.cluster_label
else:
pos = np.argmin(np.abs(np.array(model.noise_range)-eta))
eta_ = model.noise_range[pos]
idx_centers = model.hierarchy[pos]['idx_centers']
cluster_label = model.hierarchy[pos]['cluster_labels']
rho = model.rho
X = model.X
summary(idx_centers, cluster_label, rho, X, y=ytrue, eta = eta_, show=show, savefile=savefile, eta_show=eta_show)
def inferred_label(model, eta=None, show=True, savefile = None, eta_show = True, fontsize =15, psize = 20):
if eta is None:
eta_ = model.noise_range[-1]
idx_centers = model.idx_centers
cluster_label = model.cluster_label
else:
pos = np.argmin(np.abs(np.array(model.noise_range)-eta))
eta_ = model.noise_range[pos]
idx_centers = model.hierarchy[pos]['idx_centers']
cluster_label = model.hierarchy[pos]['cluster_labels']
rho = model.rho
X = model.X
n_sample=X.shape[0]
n_center=idx_centers.shape[0]
palette=COLOR_PALETTE()
plt.figure(1,figsize=(10,10))
ax = plt.subplot(111)
plot_inferred_label(ax, X, idx_centers, cluster_label, palette, psize = psize, eta = eta_, eta_show = eta_show, fontsize=fontsize)
if savefile is not None:
plt.savefig(savefile)
if show is True:
plt.show()
plt.clf()
def cluster_w_label(X, y, Xcluster=None, show=True, savefile = None, fontsize =15, psize = 20, title=None, w_label = True, figsize=None,
dpi=200, alpha=0.7, edgecolors=None, cp_style=1, w_legend=False, outlier=True):
if figsize is not None:
plt.figure(figsize=figsize)
y_unique_ = np.unique(y)
palette = COLOR_PALETTE(style=cp_style)
idx_centers = []
ax = plt.subplot(111)
all_idx = np.arange(len(X))
if outlier is True:
y_unique = y_unique_[y_unique_ > -1]
else:
y_unique = y_unique_
n_center = len(y_unique)
for i, yu in enumerate(y_unique):
pos=(y==yu)
Xsub = X[pos]
plt.scatter(Xsub[:,0],Xsub[:,1],c=palette[i], s=psize, rasterized=True, alpha=alpha, edgecolors=edgecolors, label = yu)
if Xcluster is not None:
Xmean = Xcluster[i]
else:
Xmean = np.mean(Xsub, axis=0)
#Xmean = np.mean(Xsub,axis=0)
idx_centers.append(all_idx[pos][np.argmin(np.linalg.norm(Xsub - Xmean, axis=1))])
if outlier is True:
color_out = {-3 : '#ff0050', -2 : '#9eff49', -1 : '#89f9ff'}
for yi in [-3, -2, -1]:
pos = (y == yi)
if np.count_nonzero(pos) > 0:
Xsub = X[pos]
plt.scatter(Xsub[:,0],Xsub[:,1],c=color_out[yi], s=psize, rasterized=True, alpha=alpha, marker="2",edgecolors=edgecolors, label = yi)
if w_label is True:
centers = X[idx_centers]
for xy, i in zip(centers, y_unique) :
# Position of each label.
txt = ax.annotate(str(i),xy,
xytext=(0,0), textcoords='offset points',
fontsize=fontsize,horizontalalignment='center', verticalalignment='center'
)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
xmin,xmax = plt.xlim()
ymin,ymax = plt.ylim()
dx = xmax - xmin
dy = ymax - ymin
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title,fontsize=fontsize)
if w_legend is True:
plt.legend(loc='best')
plt.tight_layout()
if savefile is not None:
if dpi is None:
plt.savefig(savefile)
else:
plt.savefig(savefile,dpi=dpi)
if show is True:
plt.show()
plt.clf()
plt.close()
return ax
def summary_v2(idx_centers, cluster_label, rho, X, n_true_center=1, y=None, psize=20, savefile=None, show=False):
""" Summary plots w/o density map """
fontsize=15
n_sample=X.shape[0]
n_center=idx_centers.shape[0]
palette=COLOR_PALETTE()
'''plt.figure(1,figsize=(10,10))
plt.subplot(131)
plt.title('True labels',fontsize=fontsize)
print("--> Plotting summary: True clustered labels, inferred labels and density map ")
if y is None:
plt.scatter(X[:,0],X[:,1],c=palette[0],rasterized=True)
else:
for i in range(n_true_center):
pos=(y==i)
plt.scatter(X[pos,0],X[pos,1], s=psize,c=palette[i],rasterized=True)
'''
ax = plt.subplot(111)
for i in range(n_center):
pos=(cluster_label==i)
plt.scatter(X[pos,0],X[pos,1],c=palette[i], s=psize, rasterized=True)
centers = X[idx_centers]
for xy, i in zip(centers, range(n_center)) :
# Position of each label.
txt = ax.annotate(str(i),xy,
xytext=(0,0), textcoords='offset points',
fontsize=20,horizontalalignment='center', verticalalignment='center'
)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
#plt.title('Inferred labels',fontsize=fontsize)
#plt.tight_layout()
#plt.subplot(133)
#density_map(X,rho,centers=X[idx_centers],title='Density map', psize=psize, show=False)
if savefile:
plt.savefig(savefile)
if show is True:
plt.show()
plt.clf()
def dendrogram(model, show=True, savefile=None):
from scipy.cluster.hierarchy import dendrogram as scipydendro
from .hierarchy import compute_linkage_matrix
fontsize=15
Z = compute_linkage_matrix(model)
scipydendro(Z)
plt.ylim(0, 1.2 * model.max_noise)
plt.xlabel('cluster $\#$',fontsize=fontsize)
plt.ylabel('$\eta$',fontsize=fontsize)
plt.title('Clustering hierarchy',fontsize=fontsize)
plt.tight_layout()
if show is True:
plt.show()
if savefile is not None:
plt.savefig(savefile)
plt.clf()
def viSNE(X_2D, X_original, markers, show=True, savefig=None, col_index = None, col_wrap = 4, downsample = None):
import pandas as pd
"""Plots intensity on top of data.
Parameters
------------
X_2D : coordinates of the data points (2d points)
X_original : original marker intensity
makers : list of str, list of names of the markers (for showing as titles)
savefig : str, if u want to save figure, should be the name of the output file
downsample : int, number of data points in a random sample of the original data
show : bool, if u want to see the plots
"""
X = X_2D
if col_index is not None:
z_df = pd.DataFrame(X_original[:,col_index], columns=[markers[i] for i in col_index])
else:
z_df = pd.DataFrame(X_original, columns=markers)
facegrid(X[:,0], X[:,1], z_df, show=show, savefig=savefig, downsample = downsample, col_wrap=col_wrap)
def facegrid(x, y, z_df, col_wrap = 4, downsample = None, show=True, savefig = None):
n_sample = x.shape[0]
if downsample is not None:
random_sub = np.random.choice(np.arange(0, n_sample, dtype=int), downsample, replace=False)
xnew = x[random_sub]
ynew = y[random_sub]
znew = z_df.iloc[random_sub]
else:
xnew = x
ynew = y
znew = z_df
n_plot = z_df.shape[1]
assert len(x) == len(y) and len(x) == len(z_df)
n_row = math.ceil(n_plot / col_wrap)
xfig = 12
yfig = 8
xper_graph = xfig/col_wrap
yper_graph = yfig/n_row
if n_row >= col_wrap:
xper_graph = yper_graph
else:
yper_graph = xper_graph
plt.figure(figsize=(xper_graph*col_wrap,yper_graph*n_row))
col_names = z_df.columns.values
for i in range(n_plot):
ax = plt.subplot(n_row, col_wrap, i+1)
my_scatter(xnew, ynew, znew.iloc[:,i].as_matrix(), ax)
ax.set_title(col_names[i])
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
if show is True:
plt.show()
if savefig is True:
plt.savefig(savefig)
def my_scatter(x, y, z, ax):
cmap = plt.get_cmap('coolwarm')
argz = np.argsort(z)
#zmin, zmax = z[argz[0]], z[argz[-1]]
#ztmp = (z+zmin)*(1./zmax)
n_sample = len(x)
bot_5 = round(n_sample*0.05)
top_5 = round(n_sample*0.95)
mid = argz[bot_5:top_5]
bot = argz[:bot_5]
top = argz[top_5:]
x_mid = x[mid]
y_mid = y[mid]
z_mid = z[mid]
x_bot = x[bot]
y_bot = y[bot]
z_bot = z[bot]
x_top = x[top]
y_top = y[top]
z_top = z[top]
ax.scatter(x_mid, y_mid, c = z_mid, cmap = cmap, s=6)
ax.scatter(x_bot, y_bot, c = "purple", s=4)
ax.scatter(x_top, y_top, c = "#00FF00",s=4)
def select_data(X, y, X_original = None, option = None, loop=False, kwargs=None):
from .widget import Highlighter
# Taking selection from the user, will plot an histogram of the underlying data (default)
# Other options are {mnist, etc. etc.}
if loop is True:
n_repeat = 10
if option == 'mnist':
for _ in range(n_repeat):
if kwargs is not None:
ax = cluster_w_label(X, y, show=False, **kwargs)
else:
ax = cluster_w_label(X, y, show=False)
highlighter = Highlighter(ax, X[:,0], X[:,1])
selected_regions = highlighter.mask
plt.close()
X_sub = X_original[selected_regions]
n_plot = min([len(X_sub), 16])
#print(xcluster.shape)
rpos = np.random.choice(np.arange(len(X_sub)), size=n_plot)
#print(rpos)
fig, ax = plt.subplots(4,4,figsize=(8,8))
count = 0
for i in range(4):
for j in range(4):
count+=1
if count > n_plot:
break
ax[i,j].imshow(X_sub[rpos[4*i+j]].reshape(28,28),cmap="Greys")
ax[i,j].set_xticks([])
ax[i,j].set_yticks([])
plt.tight_layout()
plt.show()
plt.clf()
| bsd-3-clause |
linebp/pandas | pandas/tests/sparse/test_libsparse.py | 14 | 22152 | from pandas import Series
import pytest
import numpy as np
import operator
import pandas.util.testing as tm
from pandas import compat
from pandas.core.sparse.array import IntIndex, BlockIndex, _make_index
import pandas._libs.sparse as splib
TEST_LENGTH = 20
plain_case = dict(xloc=[0, 7, 15], xlen=[3, 5, 5], yloc=[2, 9, 14],
ylen=[2, 3, 5], intersect_loc=[2, 9, 15],
intersect_len=[1, 3, 4])
delete_blocks = dict(xloc=[0, 5], xlen=[4, 4], yloc=[1], ylen=[4],
intersect_loc=[1], intersect_len=[3])
split_blocks = dict(xloc=[0], xlen=[10], yloc=[0, 5], ylen=[3, 7],
intersect_loc=[0, 5], intersect_len=[3, 5])
skip_block = dict(xloc=[10], xlen=[5], yloc=[0, 12], ylen=[5, 3],
intersect_loc=[12], intersect_len=[3])
no_intersect = dict(xloc=[0, 10], xlen=[4, 6], yloc=[5, 17], ylen=[4, 2],
intersect_loc=[], intersect_len=[])
def check_cases(_check_case):
def _check_case_dict(case):
_check_case(case['xloc'], case['xlen'], case['yloc'], case['ylen'],
case['intersect_loc'], case['intersect_len'])
_check_case_dict(plain_case)
_check_case_dict(delete_blocks)
_check_case_dict(split_blocks)
_check_case_dict(skip_block)
_check_case_dict(no_intersect)
# one or both is empty
_check_case([0], [5], [], [], [], [])
_check_case([], [], [], [], [], [])
class TestSparseIndexUnion(object):
def test_index_make_union(self):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
bresult = xindex.make_union(yindex)
assert (isinstance(bresult, BlockIndex))
tm.assert_numpy_array_equal(bresult.blocs,
np.array(eloc, dtype=np.int32))
tm.assert_numpy_array_equal(bresult.blengths,
np.array(elen, dtype=np.int32))
ixindex = xindex.to_int_index()
iyindex = yindex.to_int_index()
iresult = ixindex.make_union(iyindex)
assert (isinstance(iresult, IntIndex))
tm.assert_numpy_array_equal(iresult.indices,
bresult.to_int_index().indices)
"""
x: ----
y: ----
r: --------
"""
xloc = [0]
xlen = [5]
yloc = [5]
ylen = [4]
eloc = [0]
elen = [9]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ----- -----
y: ----- --
"""
xloc = [0, 10]
xlen = [5, 5]
yloc = [2, 17]
ylen = [5, 2]
eloc = [0, 10, 17]
elen = [7, 5, 2]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ------
y: -------
r: ----------
"""
xloc = [1]
xlen = [5]
yloc = [3]
ylen = [5]
eloc = [1]
elen = [7]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ------ -----
y: -------
r: -------------
"""
xloc = [2, 10]
xlen = [4, 4]
yloc = [4]
ylen = [8]
eloc = [2]
elen = [12]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: --- -----
y: -------
r: -------------
"""
xloc = [0, 5]
xlen = [3, 5]
yloc = [0]
ylen = [7]
eloc = [0]
elen = [10]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ------ -----
y: ------- ---
r: -------------
"""
xloc = [2, 10]
xlen = [4, 4]
yloc = [4, 13]
ylen = [8, 4]
eloc = [2]
elen = [15]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ----------------------
y: ---- ---- ---
r: ----------------------
"""
xloc = [2]
xlen = [15]
yloc = [4, 9, 14]
ylen = [3, 2, 2]
eloc = [2]
elen = [15]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ---- ---
y: --- ---
"""
xloc = [0, 10]
xlen = [3, 3]
yloc = [5, 15]
ylen = [2, 2]
eloc = [0, 5, 10, 15]
elen = [3, 2, 3, 2]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
def test_intindex_make_union(self):
a = IntIndex(5, np.array([0, 3, 4], dtype=np.int32))
b = IntIndex(5, np.array([0, 2], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32))
assert res.equals(exp)
a = IntIndex(5, np.array([], dtype=np.int32))
b = IntIndex(5, np.array([0, 2], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([0, 2], np.int32))
assert res.equals(exp)
a = IntIndex(5, np.array([], dtype=np.int32))
b = IntIndex(5, np.array([], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([], np.int32))
assert res.equals(exp)
a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32))
b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32))
res = a.make_union(b)
exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32))
assert res.equals(exp)
a = IntIndex(5, np.array([0, 1], dtype=np.int32))
b = IntIndex(4, np.array([0, 1], dtype=np.int32))
with pytest.raises(ValueError):
a.make_union(b)
class TestSparseIndexIntersect(object):
def test_intersect(self):
def _check_correct(a, b, expected):
result = a.intersect(b)
assert (result.equals(expected))
def _check_length_exc(a, longer):
pytest.raises(Exception, a.intersect, longer)
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
expected = BlockIndex(TEST_LENGTH, eloc, elen)
longer_index = BlockIndex(TEST_LENGTH + 1, yloc, ylen)
_check_correct(xindex, yindex, expected)
_check_correct(xindex.to_int_index(), yindex.to_int_index(),
expected.to_int_index())
_check_length_exc(xindex, longer_index)
_check_length_exc(xindex.to_int_index(),
longer_index.to_int_index())
if compat.is_platform_windows():
pytest.skip("segfaults on win-64 when all tests are run")
check_cases(_check_case)
def test_intersect_empty(self):
xindex = IntIndex(4, np.array([], dtype=np.int32))
yindex = IntIndex(4, np.array([2, 3], dtype=np.int32))
assert xindex.intersect(yindex).equals(xindex)
assert yindex.intersect(xindex).equals(xindex)
xindex = xindex.to_block_index()
yindex = yindex.to_block_index()
assert xindex.intersect(yindex).equals(xindex)
assert yindex.intersect(xindex).equals(xindex)
def test_intersect_identical(self):
cases = [IntIndex(5, np.array([1, 2], dtype=np.int32)),
IntIndex(5, np.array([0, 2, 4], dtype=np.int32)),
IntIndex(0, np.array([], dtype=np.int32)),
IntIndex(5, np.array([], dtype=np.int32))]
for case in cases:
assert case.intersect(case).equals(case)
case = case.to_block_index()
assert case.intersect(case).equals(case)
class TestSparseIndexCommon(object):
def test_int_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='integer')
assert isinstance(idx, IntIndex)
assert idx.npoints == 2
tm.assert_numpy_array_equal(idx.indices,
np.array([2, 3], dtype=np.int32))
idx = _make_index(4, np.array([], dtype=np.int32), kind='integer')
assert isinstance(idx, IntIndex)
assert idx.npoints == 0
tm.assert_numpy_array_equal(idx.indices,
np.array([], dtype=np.int32))
idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32),
kind='integer')
assert isinstance(idx, IntIndex)
assert idx.npoints == 4
tm.assert_numpy_array_equal(idx.indices,
np.array([0, 1, 2, 3], dtype=np.int32))
def test_block_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 2
tm.assert_numpy_array_equal(idx.blocs,
np.array([2], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([2], dtype=np.int32))
idx = _make_index(4, np.array([], dtype=np.int32), kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 0
tm.assert_numpy_array_equal(idx.blocs,
np.array([], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([], dtype=np.int32))
idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32),
kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 4
tm.assert_numpy_array_equal(idx.blocs,
np.array([0], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([4], dtype=np.int32))
idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32),
kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 3
tm.assert_numpy_array_equal(idx.blocs,
np.array([0, 2], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([1, 2], dtype=np.int32))
def test_lookup(self):
for kind in ['integer', 'block']:
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind=kind)
assert idx.lookup(-1) == -1
assert idx.lookup(0) == -1
assert idx.lookup(1) == -1
assert idx.lookup(2) == 0
assert idx.lookup(3) == 1
assert idx.lookup(4) == -1
idx = _make_index(4, np.array([], dtype=np.int32), kind=kind)
for i in range(-1, 5):
assert idx.lookup(i) == -1
idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32),
kind=kind)
assert idx.lookup(-1) == -1
assert idx.lookup(0) == 0
assert idx.lookup(1) == 1
assert idx.lookup(2) == 2
assert idx.lookup(3) == 3
assert idx.lookup(4) == -1
idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32),
kind=kind)
assert idx.lookup(-1) == -1
assert idx.lookup(0) == 0
assert idx.lookup(1) == -1
assert idx.lookup(2) == 1
assert idx.lookup(3) == 2
assert idx.lookup(4) == -1
def test_lookup_array(self):
for kind in ['integer', 'block']:
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind=kind)
res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32))
exp = np.array([-1, -1, 0], dtype=np.int32)
tm.assert_numpy_array_equal(res, exp)
res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32))
exp = np.array([-1, 0, -1, 1], dtype=np.int32)
tm.assert_numpy_array_equal(res, exp)
idx = _make_index(4, np.array([], dtype=np.int32), kind=kind)
res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32))
exp = np.array([-1, -1, -1, -1], dtype=np.int32)
idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32),
kind=kind)
res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32))
exp = np.array([-1, 0, 2], dtype=np.int32)
tm.assert_numpy_array_equal(res, exp)
res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32))
exp = np.array([-1, 2, 1, 3], dtype=np.int32)
tm.assert_numpy_array_equal(res, exp)
idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32),
kind=kind)
res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32))
exp = np.array([1, -1, 2, 0], dtype=np.int32)
tm.assert_numpy_array_equal(res, exp)
res = idx.lookup_array(np.array([1, 4, 2, 5], dtype=np.int32))
exp = np.array([-1, -1, 1, -1], dtype=np.int32)
tm.assert_numpy_array_equal(res, exp)
def test_lookup_basics(self):
def _check(index):
assert (index.lookup(0) == -1)
assert (index.lookup(5) == 0)
assert (index.lookup(7) == 2)
assert (index.lookup(8) == -1)
assert (index.lookup(9) == -1)
assert (index.lookup(10) == -1)
assert (index.lookup(11) == -1)
assert (index.lookup(12) == 3)
assert (index.lookup(17) == 8)
assert (index.lookup(18) == -1)
bindex = BlockIndex(20, [5, 12], [3, 6])
iindex = bindex.to_int_index()
_check(bindex)
_check(iindex)
# corner cases
class TestBlockIndex(object):
def test_block_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 2
tm.assert_numpy_array_equal(idx.blocs,
np.array([2], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([2], dtype=np.int32))
idx = _make_index(4, np.array([], dtype=np.int32), kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 0
tm.assert_numpy_array_equal(idx.blocs,
np.array([], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([], dtype=np.int32))
idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32),
kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 4
tm.assert_numpy_array_equal(idx.blocs,
np.array([0], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([4], dtype=np.int32))
idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind='block')
assert isinstance(idx, BlockIndex)
assert idx.npoints == 3
tm.assert_numpy_array_equal(idx.blocs,
np.array([0, 2], dtype=np.int32))
tm.assert_numpy_array_equal(idx.blengths,
np.array([1, 2], dtype=np.int32))
def test_make_block_boundary(self):
for i in [5, 10, 100, 101]:
idx = _make_index(i, np.arange(0, i, 2, dtype=np.int32),
kind='block')
exp = np.arange(0, i, 2, dtype=np.int32)
tm.assert_numpy_array_equal(idx.blocs, exp)
tm.assert_numpy_array_equal(idx.blengths,
np.ones(len(exp), dtype=np.int32))
def test_equals(self):
index = BlockIndex(10, [0, 4], [2, 5])
assert index.equals(index)
assert not index.equals(BlockIndex(10, [0, 4], [2, 6]))
def test_check_integrity(self):
locs = []
lengths = []
# 0-length OK
# TODO: index variables are not used...is that right?
index = BlockIndex(0, locs, lengths) # noqa
# also OK even though empty
index = BlockIndex(1, locs, lengths) # noqa
# block extend beyond end
pytest.raises(Exception, BlockIndex, 10, [5], [10])
# block overlap
pytest.raises(Exception, BlockIndex, 10, [2, 5], [5, 3])
def test_to_int_index(self):
locs = [0, 10]
lengths = [4, 6]
exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]
block = BlockIndex(20, locs, lengths)
dense = block.to_int_index()
tm.assert_numpy_array_equal(dense.indices,
np.array(exp_inds, dtype=np.int32))
def test_to_block_index(self):
index = BlockIndex(10, [0, 5], [4, 5])
assert index.to_block_index() is index
class TestIntIndex(object):
def test_check_integrity(self):
# Too many indices than specified in self.length
msg = "Too many indices"
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=1, indices=[1, 2, 3])
# No index can be negative.
msg = "No index can be less than zero"
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=5, indices=[1, -2, 3])
# No index can be negative.
msg = "No index can be less than zero"
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=5, indices=[1, -2, 3])
# All indices must be less than the length.
msg = "All indices must be less than the length"
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=5, indices=[1, 2, 5])
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=5, indices=[1, 2, 6])
# Indices must be strictly ascending.
msg = "Indices must be strictly increasing"
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=5, indices=[1, 3, 2])
with tm.assert_raises_regex(ValueError, msg):
IntIndex(length=5, indices=[1, 3, 3])
def test_int_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='integer')
assert isinstance(idx, IntIndex)
assert idx.npoints == 2
tm.assert_numpy_array_equal(idx.indices,
np.array([2, 3], dtype=np.int32))
idx = _make_index(4, np.array([], dtype=np.int32), kind='integer')
assert isinstance(idx, IntIndex)
assert idx.npoints == 0
tm.assert_numpy_array_equal(idx.indices,
np.array([], dtype=np.int32))
idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32),
kind='integer')
assert isinstance(idx, IntIndex)
assert idx.npoints == 4
tm.assert_numpy_array_equal(idx.indices,
np.array([0, 1, 2, 3], dtype=np.int32))
def test_equals(self):
index = IntIndex(10, [0, 1, 2, 3, 4])
assert index.equals(index)
assert not index.equals(IntIndex(10, [0, 1, 2, 3]))
def test_to_block_index(self):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
# see if survive the round trip
xbindex = xindex.to_int_index().to_block_index()
ybindex = yindex.to_int_index().to_block_index()
assert isinstance(xbindex, BlockIndex)
assert xbindex.equals(xindex)
assert ybindex.equals(yindex)
check_cases(_check_case)
def test_to_int_index(self):
index = IntIndex(10, [2, 3, 4, 5, 6])
assert index.to_int_index() is index
class TestSparseOperators(object):
def _op_tests(self, sparse_op, python_op):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
xdindex = xindex.to_int_index()
ydindex = yindex.to_int_index()
x = np.arange(xindex.npoints) * 10. + 1
y = np.arange(yindex.npoints) * 100. + 1
xfill = 0
yfill = 2
result_block_vals, rb_index, bfill = sparse_op(x, xindex, xfill, y,
yindex, yfill)
result_int_vals, ri_index, ifill = sparse_op(x, xdindex, xfill, y,
ydindex, yfill)
assert rb_index.to_int_index().equals(ri_index)
tm.assert_numpy_array_equal(result_block_vals, result_int_vals)
assert bfill == ifill
# check versus Series...
xseries = Series(x, xdindex.indices)
xseries = xseries.reindex(np.arange(TEST_LENGTH)).fillna(xfill)
yseries = Series(y, ydindex.indices)
yseries = yseries.reindex(np.arange(TEST_LENGTH)).fillna(yfill)
series_result = python_op(xseries, yseries)
series_result = series_result.reindex(ri_index.indices)
tm.assert_numpy_array_equal(result_block_vals,
series_result.values)
tm.assert_numpy_array_equal(result_int_vals, series_result.values)
check_cases(_check_case)
# too cute? oh but how I abhor code duplication
check_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
def make_optestf(op):
def f(self):
sparse_op = getattr(splib, 'sparse_%s_float64' % op)
python_op = getattr(operator, op)
self._op_tests(sparse_op, python_op)
f.__name__ = 'test_%s' % op
return f
for op in check_ops:
g = make_optestf(op)
setattr(TestSparseOperators, g.__name__, g)
del g
| bsd-3-clause |
hazelnusse/sympy-old | examples/intermediate/sample.py | 11 | 3354 | """
Utility functions for plotting sympy functions.
See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d
graphing functions using matplotlib.
"""
from numpy import repeat, arange, empty, ndarray, array
from sympy import Symbol, Basic, Real, Rational, I, sympify
def sample2d(f, x_args):
"""
Samples a 2d function f over specified intervals and returns two
arrays (X, Y) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot2d.py.
f is a function of one variable, such as x**2.
x_args is an interval given in the form (var, min, max, n)
"""
try:
f = sympify(f)
except:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except:
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = arange(float(x_min), float(x_max)+x_d, x_d)
Y = empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except:
Y[i] = None
return X, Y
def sample3d(f, x_args, y_args):
"""
Samples a 3d function f over specified intervals and returns three
2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot3d.py.
f is a function of two variables, such as x**2 + y**2.
x_args and y_args are intervals given in the form (var, min, max, n)
"""
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except:
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = arange(float(x_min), float(x_max)+x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = arange(float(y_min), float(y_max)+y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = array(x)
y = array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = repeat(x, numRows, 0)
y.shape = numRows, 1
Y = repeat(y, numCols, 1)
return X, Y
X, Y = meshgrid(x_a, y_a)
Z = ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float( f.subs(x, X[j][k]).subs(y, Y[j][k]) )
except:
Z[j][k] = 0
return X, Y, Z
def sample(f, *var_args):
"""
Samples a 2d or 3d function over specified intervals and returns
a dataset suitable for plotting with matlab (matplotlib) syntax.
Wrapper for sample2d and sample3d.
f is a function of one or two variables, such as x**2.
var_args are intervals for each variable given in the form (var, min, max, n)
"""
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
BoltzmannBrain/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mathtext.py | 69 | 101723 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email mdroe@stsci.edu, but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.ymax, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
return LOAD_FORCE_AUTOHINT
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = unichr(info.num)
self.svg_glyphs.append(
(info.font, info.fontsize, thetext, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return glyph.height/64.0/2.0 + 256.0/64.0 * dpi/72.0
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, 'it', 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
fontmap = {}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
fontmap = {}
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
new_fontname = fontname
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname == 'it' and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s'" %
(fontname, sym.encode('ascii', 'backslashreplace')),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSize1',
2 : 'STIXSize2',
3 : 'STIXSize3',
4 : 'STIXSize4',
5 : 'STIXSize5'
}
fontmap = {}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if self._sans and mapping is None:
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping[font_class]
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = 'it'
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr(uniindex)))
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm')
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 4
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
placeable <<(accent
^ function
^ (c_over_c | symbol)
^ group
^ frac
^ sqrt
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
autoDelim
^ OneOrMore(simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('it', 'rm', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = 'it'
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, 'it', 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent'
}
_wide_accents = set(r"widehat widetilde".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height + hlist.depth + rule_thickness * 2.0
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth * 0.5
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width) + thickness * 10.
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state), # rule
Vbox(0, thickness * 4.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, 'it', '=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
hlist = Hlist([vlist, Hbox(thickness * 2.)])
return [hlist]
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle.asList())
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
| agpl-3.0 |
hncg/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
kjung/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
sbussmann/Bussmann2015 | Code/fluxplot.py | 2 | 2222 | """
2014 July 16
Shane Bussmann
Plot the distribution of fluxdensities for the ALMA sample. Compare total
observed flux (what a single-dish telescope with 20" FWHM resolution would see)
with the individual observed flux (accounting for blending) and with the
individual intrinsic flux (accounting for lensing).
"""
import matplotlib.pyplot as plt
import getfluxes
import numpy
from pylab import savefig
def getstat(fluxdist):
fmean = fluxdist.mean()
fstd = fluxdist.std()
return fmean, fstd
# f1: total observed flux density
# f2: per-source observed flux density
# f3: per-source intrinsic flux density
f1, f2, f3 = getfluxes.all('uvfit25', mu_estimate=True)
fnu = f1['fnu']
fnu = numpy.append(fnu, f2['fnu'])
fnu = numpy.append(fnu, f3['fnu'])
m1 = 0
m2 = 1 + int(fnu.max())
binwidth = 2
bins = numpy.arange(m1, m2 + binwidth, binwidth)
fluxarr = f1['fnu']
plt.hist(fluxarr, bins = bins, histtype='stepfilled', edgecolor='black',
hatch='///', facecolor='none', label='Observed, Unresolved')
fmean, fstd = getstat(fluxarr)
strmean = '{0:.1f}'.format(fmean)
strstd = '{0:.1f}'.format(fstd)
s1 = 'Observed, Unresolved <S_870> = ' + strmean + ' +/- ' + strstd
plt.text(65, 13, s1, ha='right', fontsize='large')
binwidth = 2
bins = numpy.arange(m1, m2 + binwidth, binwidth)
fluxarr = f2['fnu']
plt.hist(fluxarr, bins = bins, alpha=0.5, histtype='stepfilled',
color='red', label='Observed, Resolved')
fmean, fstd = getstat(fluxarr)
strmean = '{0:.1f}'.format(fmean)
strstd = '{0:.1f}'.format(fstd)
s1 = 'Observed, Resolved <S_870> = ' + strmean + ' +/- ' + strstd
plt.text(65, 11, s1, ha='right', fontsize='large')
binwidth = 2
fluxarr = f3['fnu']# / mu
bins = numpy.arange(m1, m2 + binwidth, binwidth)
plt.hist(fluxarr, bins = bins, alpha=0.2, histtype='stepfilled',
color='blue', label='Intrinsic, Resolved')
fmean, fstd = getstat(fluxarr)
strmean = '{0:.1f}'.format(fmean)
strstd = '{0:.1f}'.format(fstd)
s1 = 'Intrinsic, Resolved <S_870> = ' + strmean + ' +/- ' + strstd
plt.text(65, 9, s1, ha='right', fontsize='large')
plt.xlabel(r'$S_{870} \, ({\rm mJy})$', fontsize='x-large')
plt.ylabel('N', fontsize='x-large')
plt.tight_layout()
plt.legend()
savefig('fluxdist.png')
| mit |
lin-credible/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
v0devil/jltom | datagenerator_py3.py | 1 | 38469 | from collections import OrderedDict
import json
import logging
#from pylab import *
import numpy as np
import pandas as pd
import sys
import re
import os
import zipfile
import sqlalchemy
import shutil
import time
import datetime
import argparse
from xml.etree.ElementTree import ElementTree
from os.path import basename
from sqlalchemy import create_engine
from sqlalchemy.sql import select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine import reflection
from itertools import islice
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
db_engine = create_engine('postgresql://postgres:1234@localhost:5432/jmeter')
db_connection = db_engine.connect()
meta = sqlalchemy.MetaData(bind=db_connection, reflect=True, schema="jltc")
insp = reflection.Inspector.from_engine(db_engine)
project_name = sys.argv[1]
project = meta.tables['jltc.project']
test = meta.tables['jltc.test']
test_data = meta.tables['jltc.test_data']
action = meta.tables['jltc.action']
test_action_data = meta.tables['jltc.test_action_data']
server = meta.tables['jltc.server']
server_monitoring_data = meta.tables['jltc.server_monitoring_data']
test_aggregate = meta.tables['jltc.test_aggregate']
test_action_aggregate_data = meta.tables['jltc.test_action_aggregate_data']
user = meta.tables['jltc.user']
project_graphite_settings = meta.tables['jltc.project_graphite_settings']
error = meta.tables['jltc.error']
test_error = meta.tables['jltc.test_error']
Session = sessionmaker(bind=db_engine)
db_session = Session()
""" stm = test_error.delete()
result = db_session.execute(stm)
stm = error.delete()
result = db_session.execute(stm)
stm = server_monitoring_data.delete()
result = db_session.execute(stm)
stm = test_aggregate.delete()
result = db_session.execute(stm)
stm = test_action_data.delete()
result = db_session.execute(stm)
stm = test_data.delete()
result = db_session.execute(stm)
stm = test.delete()
result = db_session.execute(stm)
stm = action.delete()
result = db_session.execute(stm)
stm = server.delete()
result = db_session.execute(stm)
stm = project.delete()
result = db_session.execute(stm)
stm = test_action_aggregate_data.delete()
result = db_session.execute(stm)
db_session.commit() """
logger.info("Starting data generating script.")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--project-name', default='test')
parser.add_argument('--jenkins-base-dir', default='/var/lib/jenkins/')
return parser.parse_args()
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
def mask(df, f):
return df[f(df)]
def getIndex(item):
return int(re.search('(\d+)/', item[0].replace('\\', '/')).group(1))
def ord_to_char(v, p=None):
return chr(int(v))
def get_dir_size(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
if not f == 'checksum':
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def zip_results_file(file):
if os.path.exists(file + '.zip'):
os.remove(file + '.zip')
logger.info("Move results file " + file + " to zip archive")
with zipfile.ZipFile(
file + ".zip", "w", zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
zip_file.write(file, basename(file))
os.remove(file)
logger.info("File was packed, original file was deleted")
def zip_dir(dirPath, zipPath):
zipf = zipfile.ZipFile(zipPath, mode='w', allowZip64=True)
lenDirPath = len(dirPath)
for root, _, files in os.walk(dirPath):
for file in files:
filePath = os.path.join(root, file)
zipf.write(filePath, filePath[lenDirPath:])
zipf.close()
def execute_db_stmt(stm, data):
try:
result = db_connection.execute(stm)
except (sqlalchemy.exc.DataError, sqlalchemy.exc.StatementError,
TypeError) as e:
logger.error("Cannot add new data for action: {}".format(url))
logger.error("Data: {}".format(data))
logger.error("Exception {}".format(e))
result = False
return result
data_to_analyze = []
releases = []
build_xml = ElementTree()
args = parse_args()
if args.project_name:
project_name = args.project_name
if args.jenkins_base_dir:
jenkins_base_dir = args.jenkins_base_dir
builds_dir = os.path.join(jenkins_base_dir, 'jobs', project_name,
'builds').replace('\\', '/')
rx = re.compile(r'.+?.jtl')
logger.info('Check builds directory: {}'.format(builds_dir))
for root, dirs, files in os.walk(builds_dir):
for file in files:
logger.info(file)
if re.match(rx, os.path.join(root, file)):
if os.stat(os.path.join(root, file)).st_size > 0:
root = root.replace('\\', '/')
build_parameters = []
display_name = "unknown"
description = ""
start_time = 0
duration = 0
monitoring_data = os.path.join(root, "monitoring.data")
errors_data = os.path.join(root, "errors")
build_xml_path = os.path.join(root, "build.xml")
if os.path.isfile(build_xml_path):
logger.info(
"Try to parse Jenkins build XML-file: {0}".format(
build_xml_path))
with open(build_xml_path, "r") as fixfile:
data = fixfile.read()
data = data.replace("&#x", "")
with open(build_xml_path, "w") as fixfile:
fixfile.write(data)
build_xml.parse(build_xml_path)
build_tag = build_xml.getroot()
for params in build_tag:
if params.tag == 'actions':
parameters = params.find('.//parameters')
for parameter in parameters:
name = parameter.find('name')
value = parameter.find('value')
build_parameters.append({
name.text: value.text
})
userId = params.find('.//userId')
if userId is not None:
started_by = userId.text
if db_session.query(user.c.id).filter(
user.c.login ==
started_by).count() == 0:
logger.info(
"Adding new user: {0}".format(
started_by))
stm = user.insert().values(
login=started_by)
result = db_connection.execute(stm)
user_id = db_session.query(user.c.id). \
filter(user.c.login == started_by).scalar()
else:
user_id = 1
elif params.tag == 'startTime':
start_time = int(params.text)
elif params.tag == 'displayName':
display_name = params.text
elif params.tag == 'duration':
duration = int(params.text)
elif params.tag == 'description':
description = params.text
if "Performance_HTML_Report" not in os.path.join(root, file):
data_to_analyze.append([
os.path.join(root, file), monitoring_data, errors_data,
display_name, build_parameters, root
])
project_name = re.search('/([^/]+)/builds', root).group(1)
if db_session.query(project.c.id). \
filter(project.c.project_name == project_name).count() == 0:
logger.info(
"Adding new project: {0}".format(project_name))
stm = project.insert().values(
project_name=project_name, show=True)
result = db_connection.execute(stm)
project_id = db_session.query(project.c.id). \
filter(project.c.project_name == project_name).scalar()
if db_session.query(test.c.path).filter(
test.c.path == root).count() == 0:
logger.info("Was found new test data, adding.")
build_number = int(
re.search('/builds/(\d+)', root).group(1))
end_time = start_time + duration
if start_time == end_time:
end_time = int(time.time() * 1000)
stm = test.insert().values(
path=root,
display_name=display_name,
description=description,
parameters=build_parameters,
project_id=project_id,
start_time=start_time,
end_time=end_time,
build_number=build_number,
started_by_id=user_id,
data_resolution='1Min',
show=True)
result = db_connection.execute(stm)
data_to_analyze = sorted(data_to_analyze, key=getIndex, reverse=True)
releases.sort()
dateconv = np.vectorize(datetime.datetime.fromtimestamp)
aggregate_table = 'aggregate_table'
monitor_table = 'monitor_table'
agg = {}
mon = {}
rtot_over_releases = []
cpu_over_releases = []
file_index = 0
logger.info("Trying to open CSV-files")
build_roots = [data_to_analyze[i][5] for i in range(0, len(data_to_analyze))]
logger.info(data_to_analyze)
for d_ in data_to_analyze:
build_root = d_[5]
logger.info("Current build directory:" + build_root)
test_id = db_session.query(
test.c.id).filter(test.c.path == build_root).scalar()
project_id = db_session.query(
test.c.project_id).filter(test.c.id == test_id).scalar()
checksum = -1
if db_session.query(test_data.c.test_id).filter(
test_data.c.test_id == test_id).count() == 0:
df = pd.DataFrame()
jmeter_results_file = d_[0]
if not os.path.exists(jmeter_results_file):
logger.info("Results file does not exists, try to check archive")
jmeter_results_zip = jmeter_results_file + ".zip"
if os.path.exists(jmeter_results_zip):
logger.info("Archive file was found: " + jmeter_results_zip)
with zipfile.ZipFile(jmeter_results_zip, "r") as z:
z.extractall(build_root)
logger.info("Executing a new parse: " + jmeter_results_file +
" size: " + str(os.stat(jmeter_results_file).st_size))
if os.stat(jmeter_results_file).st_size > 1000007777:
logger.info("Executing a parse for a huge file")
chunks = pd.read_table(
jmeter_results_file, sep=',', index_col=0, chunksize=6000000)
parse_task = 0
for chunk in chunks:
parse_task += 1
logger.info("Chunk #{}".format(parse_task))
chunk.columns = [
'response_time', 'url', 'responseCode', 'success',
'threadName', 'failureMessage', 'grpThreads', 'allThreads'
]
chunk = chunk[~chunk['url'].str.contains('exclude_')]
#chunk = chunk[np.abs(chunk['response_time']-chunk['response_time'].mean())<=(3*chunk['response_time'].std())] #keep only the ones that are within +3 to -3 standard deviations
#convert timestamps to normal date/time
chunk.index = pd.to_datetime(
dateconv((chunk.index.values / 1000)))
num_lines = chunk['response_time'].count()
logger.info("Number of lines in chunk: %d." % num_lines)
unique_urls = chunk['url'].unique()
logger.info("Actions in the chunk: {}".format(
str(unique_urls)))
for url in unique_urls:
if db_session.query(action.c.id).filter(action.c.url == url).\
filter(action.c.project_id == project_id).count() == 0:
logger.info(
"Adding new action with URL: {}".format(url))
stm = action.insert().values(
url=url,
project_id=project_id,
)
result = execute_db_stmt(stm, url)
action_id = db_session.query(action.c.id).filter(action.c.url == url). \
filter(action.c.project_id == project_id).scalar()
logger.info("Adding data for action: {}".format(url))
df_url = chunk[(chunk.url == url)]
n = df_url.shape[0]
freq = '1Min'
if n > 10:
df_url = df_url[np.abs(df_url['response_time'] -
df_url['response_time'].mean())
<= (3 * df_url['response_time'].std())]
url_data = pd.DataFrame()
df_url_gr_by_ts = df_url.groupby(pd.Grouper(freq=freq))
url_data['avg'] = df_url_gr_by_ts.response_time.mean()
url_data['median'] = df_url_gr_by_ts.response_time.median()
url_data['count'] = df_url_gr_by_ts.success.count()
del df_url_gr_by_ts
df_url_gr_by_ts_only_errors = df_url[(
df_url.success == False)].groupby(
pd.Grouper(freq=freq))
try:
url_data['errors'] = float(
df_url_gr_by_ts_only_errors.success.count())
except (ValueError, TypeError) as e:
url_data['errors'] = 0
url_data['test_id'] = test_id
url_data['url'] = url
output_json = json.loads(
url_data.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
del url_data
for row in output_json:
data = {
'timestamp': row,
'avg': float(output_json[row]['avg']),
'median': float(output_json[row]['median']),
'count': int(output_json[row]['count']),
'url': output_json[row]['url'],
'errors': int(output_json[row]['errors']),
'test_id': int(output_json[row]['test_id']),
}
stm = test_action_data.insert().values(
test_id=output_json[row]['test_id'],
action_id=action_id,
data_resolution_id=1,
data=data)
result = execute_db_stmt(stm, data)
url_agg_data = dict(
json.loads(
df_url['response_time'].describe().to_json()))
url_agg_data['99%'] = df_url['response_time'].quantile(.99)
url_agg_data['90%'] = df_url['response_time'].quantile(.90)
url_agg_data['weight'] = float(
df_url['response_time'].sum())
url_agg_data['errors'] = float(df_url[(
df_url['success'] == False)]['success'].count())
logger.info("Check aggregate data: {} {}".format(
test_id, action_id))
if db_session.query(
test_action_aggregate_data.c.id).filter(
test_action_aggregate_data.c.test_id == test_id
).filter(test_action_aggregate_data.c.action_id ==
action_id).count() == 0:
try:
stm = test_action_aggregate_data.insert().values(
test_id=test_id,
action_id=action_id,
data=url_agg_data)
result = db_connection.execute(stm)
except (sqlalchemy.exc.DataError,
sqlalchemy.exc.StatementError, TypeError) as e:
logger.error("Data: {}".format(url_agg_data))
logger.error("Exception {}".format(e))
else:
data = {}
old_data = db_session.query(
test_action_aggregate_data.c.data).filter(
test_action_aggregate_data.c.test_id == test_id
).filter(test_action_aggregate_data.c.action_id ==
action_id).one()
new_data = url_agg_data
logger.info("old_data")
logger.info(old_data)
logger.info("new_data")
logger.info(new_data)
maximum = new_data[
'max'] if new_data['max'] > old_data[0]['max'] else old_data[
0]['max']
minimum = new_data[
'min'] if new_data['min'] < old_data[0]['min'] else old_data[
0]['min']
p50 = new_data[
'50%'] if new_data['50%'] > old_data[0]['50%'] else old_data[
0]['50%']
p75 = new_data[
'75%'] if new_data['75%'] > old_data[0]['75%'] else old_data[
0]['75%']
p90 = new_data[
'90%'] if new_data['90%'] > old_data[0]['90%'] else old_data[
0]['90%']
p99 = new_data[
'99%'] if new_data['99%'] > old_data[0]['99%'] else old_data[
0]['99%']
std = new_data['std']
old_data = {
'mean':
(old_data[0]['weight'] + new_data['weight']) /
(old_data[0]['count'] + new_data['count']),
'max':
maximum,
'min':
minimum,
'count':
old_data[0]['count'] + new_data['count'],
'errors':
old_data[0]['errors'] + new_data['errors'],
'weight':
old_data[0]['weight'] + new_data['weight'],
'50%':
p50,
'75%':
p75,
'90%':
p90,
'99%':
p99,
'std':
std,
}
stm = test_action_aggregate_data.update().values(
data=old_data).where(
test_action_aggregate_data.c.test_id == test_id
).where(test_action_aggregate_data.c.action_id ==
action_id)
try:
result = db_connection.execute(stm)
except (sqlalchemy.exc.DataError,
sqlalchemy.exc.StatementError, TypeError) as e:
logger.error("Data: {}".format(old_data))
logger.error("Exception {}".format(e))
del url_agg_data, df_url
test_overall_data = pd.DataFrame()
df_gr_by_ts = chunk.groupby(pd.Grouper(freq=freq))
test_overall_data['avg'] = df_gr_by_ts.response_time.mean()
test_overall_data[
'median'] = df_gr_by_ts.response_time.median()
test_overall_data['count'] = df_gr_by_ts.response_time.count()
test_overall_data['test_id'] = test_id
output_json = json.loads(
test_overall_data.to_json(
orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
for row in output_json:
data = {
'timestamp': row,
'avg': float(output_json[row]['avg']),
'median': float(output_json[row]['median']),
'count': float(output_json[row]['count'])
}
stm = test_data.insert().values(
test_id=output_json[row]['test_id'],
data=data,
data_resolution_id=1,
source='default')
result = execute_db_stmt(stm, data)
del test_overall_data, df_gr_by_ts, chunk
logger.info("Chunk #{} was parsed.".format(parse_task))
del chunks
else:
df = pd.read_csv(
jmeter_results_file, index_col=0, low_memory=False)
df.columns = [
'response_time', 'url', 'responseCode', 'success',
'threadName', 'failureMessage', 'grpThreads', 'allThreads'
]
df = df[~df['url'].str.contains('exclude_')]
#df = df[np.abs(df['response_time']-df['response_time'].mean())<=(3*df['response_time'].std())] #keep only the ones that are within +3 to -3 standard deviations
df.columns = [
'response_time', 'url', 'responseCode', 'success',
'threadName', 'failureMessage', 'grpThreads', 'allThreads'
]
#convert timestamps to normal date/time
df.index = pd.to_datetime(dateconv((df.index.values / 1000)))
num_lines = df['response_time'].count()
logger.info("Number of lines in file: %d." % num_lines)
unique_urls = df['url'].unique()
for url in unique_urls:
if db_session.query(action.c.id).filter(action.c.url == url).\
filter(action.c.project_id == project_id).count() == 0:
logger.info("Adding new action with URL: {}".format(url))
stm = action.insert().values(
url=url,
project_id=project_id,
)
result = execute_db_stmt(stm, url)
action_id = db_session.query(action.c.id).filter(action.c.url == url). \
filter(action.c.project_id == project_id).scalar()
logger.info("Adding data for action: {}".format(url))
df_url = df[(df.url == url)]
n = df_url.shape[0]
freq = '1Min'
if n > 10 and n < 10000000:
logger.info('Size of the data set for action {}:{}'.format(
url, n))
# Filter outliers (> or < 3 sigmas)
df_url = df_url[np.abs(df_url['response_time'] -
df_url['response_time'].mean()) <=
(3 * df_url['response_time'].std())]
elif n > 30000000:
freq = '10Min'
url_data = pd.DataFrame()
df_url_gr_by_ts = df_url.groupby(pd.Grouper(freq=freq))
url_data['avg'] = df_url_gr_by_ts.response_time.mean()
url_data['median'] = df_url_gr_by_ts.response_time.median()
url_data['count'] = df_url_gr_by_ts.success.count()
del df_url_gr_by_ts
df_url_gr_by_ts_only_errors = df_url[(
df_url.success == False)].groupby(pd.Grouper(freq=freq))
url_data[
'errors'] = df_url_gr_by_ts_only_errors.success.count()
url_data['test_id'] = test_id
url_data['url'] = url
output_json = json.loads(
url_data.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
del url_data
for row in output_json:
data = {
'timestamp': row,
'avg': output_json[row]['avg'],
'median': output_json[row]['median'],
'count': output_json[row]['count'],
'url': output_json[row]['url'],
'errors': output_json[row]['errors'],
'test_id': output_json[row]['test_id'],
}
stm = test_action_data.insert().values(
test_id=output_json[row]['test_id'],
action_id=action_id,
data_resolution_id=1,
data=data)
result = execute_db_stmt(stm, data)
url_agg_data = dict(
json.loads(df_url['response_time'].describe().to_json()))
url_agg_data['99%'] = float(df_url['response_time'].quantile(.99))
url_agg_data['90%'] = float(df_url['response_time'].quantile(.90))
url_agg_data['weight'] = float(df_url['response_time'].sum())
url_agg_data['errors'] = int(df_url[(
df_url['success'] == False)]['success'].count())
stm = test_action_aggregate_data.insert().values(
test_id=test_id, action_id=action_id, data=url_agg_data)
result = execute_db_stmt(stm, url_agg_data)
del url_agg_data, df_url
test_overall_data = pd.DataFrame()
df_gr_by_ts = df.groupby(pd.Grouper(freq=freq))
test_overall_data['avg'] = df_gr_by_ts.response_time.mean()
test_overall_data['median'] = df_gr_by_ts.response_time.median()
test_overall_data['count'] = df_gr_by_ts.response_time.count()
test_overall_data['test_id'] = test_id
output_json = json.loads(
test_overall_data.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
for row in output_json:
data = {
'timestamp': row,
'avg': output_json[row]['avg'],
'median': output_json[row]['median'],
'count': output_json[row]['count']
}
stm = test_data.insert().values(
test_id=test_id,
data=data,
data_resolution_id=1,
source='default')
result = execute_db_stmt(stm, data)
del test_overall_data, df_gr_by_ts, df
zip_results_file(jmeter_results_file)
file_index += 1
num = 0
GRAPHS = ""
for build_root in build_roots:
uniqueURL = []
rownum = 0
if os.path.isfile(
data_to_analyze[num][1]) and os.stat(data_to_analyze[num][1]).st_size != 0:
test_id = db_session.query(
test.c.id).filter(test.c.path == build_root).scalar()
f = open(data_to_analyze[num][1], "r")
lines = f.readlines()
f.close()
f = open(data_to_analyze[num][1], "w")
for line in lines:
if not ('start' in line):
f.write(line)
f.close()
monitoring_df = pd.read_csv(data_to_analyze[num][1], index_col=1, sep=";")
monitoring_df.columns = [
'server_name', 'Memory_used', 'Memory_free', 'Memory_buff',
'Memory_cached', 'Net_recv', 'Net_send', 'Disk_read', 'Disk_write',
'System_la1', 'CPU_user', 'CPU_system', 'CPU_iowait'
]
monitoring_df.index = pd.to_datetime(
dateconv((monitoring_df.index.values)))
monitoring_df.index.names = ['timestamp']
unique_servers = monitoring_df['server_name'].unique()
for server_ in unique_servers:
if db_session.query(server.c.id).\
filter(server.c.server_name == server_).count() == 0:
logger.info("Adding new server: {}".format(server_))
stm = server.insert().values(server_name=server_)
result = execute_db_stmt(stm, server_)
server_id = db_session.query(server.c.id).\
filter(server.c.server_name == server_).scalar()
if db_session.query(server_monitoring_data.c.test_id).\
filter(server_monitoring_data.c.test_id==test_id).\
filter(server_monitoring_data.c.server_id==server_id).count()==0:
df_server = monitoring_df[(
monitoring_df.server_name == server_)]
output_json = json.loads(
df_server.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
for row in output_json:
data = {
'timestamp': row,
'Memory_used': output_json[row]['Memory_used'],
'Memory_free': output_json[row]['Memory_free'],
'Memory_buff': output_json[row]['Memory_buff'],
'Memory_cached': output_json[row]['Memory_cached'],
'Net_recv': output_json[row]['Net_recv'],
'Net_send': output_json[row]['Net_send'],
'Disk_read': output_json[row]['Disk_read'],
'Disk_write': output_json[row]['Disk_write'],
'System_la1': output_json[row]['System_la1'],
'CPU_user': output_json[row]['CPU_user'],
'CPU_system': output_json[row]['CPU_system'],
'CPU_iowait': output_json[row]['CPU_iowait']
}
stm = server_monitoring_data.insert().values(
test_id=test_id,
server_id=server_id,
data=data,
data_resolution_id=1,
source='default')
result = execute_db_stmt(stm, data)
else:
logger.info("Monitoring data is not exist")
errors_zip_dest = build_root + "/errors.zip"
test_id = db_session.query(
test.c.id).filter(test.c.path == build_root).scalar()
if db_session.query(test_error.c.id).filter(
test_error.c.test_id == test_id).count() == 0:
logger.info("Errors data is empty for test: {}".format(test_id))
if not os.path.isdir(data_to_analyze[num][2]) or not len(
os.listdir(data_to_analyze[num][2])) > 0:
if os.path.exists(errors_zip_dest):
logger.info("Archive file was found: " + errors_zip_dest)
with zipfile.ZipFile(errors_zip_dest, "r") as z:
z.extractall(build_root + '/errors/')
if os.path.isdir(
data_to_analyze[num][2]) and len(os.listdir(data_to_analyze[num][2])) > 0:
logger.info("Parsing errors data")
project_id = db_session.query(
test.c.project_id).filter(test.c.id == test_id).scalar()
# Iterate through files in errors folder
for root, dirs, files in os.walk(data_to_analyze[num][2]):
for file in files:
error_file = os.path.join(root, file)
try:
error_text = ""
error_code = 0
action_name = ""
with open(error_file) as fin:
error_text = ""
for i, line in enumerate(fin):
if i == 0:
action_name = line
action_name = re.sub(
'(\r\n|\r|\n)', '', action_name)
elif i == 1:
error_code = line
error_code = re.sub(
'(\r\n|\r|\n)', '', error_code)
elif i > 1 and i < 6: # take first 4 line of error
error_text += line
error_text = re.sub('\d', 'N', error_text)
error_text = re.sub('(\r\n|\r|\n)', '_', error_text)
error_text = re.sub('\s', '_', error_text)
if db_session.query(action.c.id).filter(action.c.url == action_name).\
filter(action.c.project_id == project_id).count() > 0:
action_id = db_session.query(
action.c.id
).filter(action.c.url == action_name).filter(
action.c.project_id == project_id).scalar()
if db_session.query(error.c.id).filter(
error.c.text == error_text).count() == 0:
logger.info(
"Adding new error: {}".format(error_text))
stm = error.insert().values(
text=error_text, code=error_code)
result = execute_db_stmt(stm, error_code)
error_id = db_session.query(error.c.id).filter(
error.c.text == error_text).scalar()
if db_session.query(test_error.c.id).filter(
test_error.c.error_id == error_id
).filter(test_error.c.test_id == test_id).filter(
test_error.c.action_id ==
action_id).count() == 0:
stm = test_error.insert().values(
test_id=test_id,
error_id=error_id,
action_id=action_id,
count=1)
result = execute_db_stmt(stm, action_id)
else:
prev_count = db_session.query(
test_error.c.count).filter(
test_error.c.error_id == error_id
).filter(test_error.c.test_id == test_id
).filter(test_error.c.action_id ==
action_id).scalar()
stm = test_error.update(
).values(count=prev_count + 1).where(
test_error.c.error_id == error_id
).where(test_error.c.test_id == test_id).where(
test_error.c.action_id == action_id)
result = execute_db_stmt(stm, action_id)
except ValueError:
logger.error("Cannot parse error file for: ")
zip_dir(data_to_analyze[num][2], errors_zip_dest)
try:
if 'errors' in data_to_analyze[num][2]:
shutil.rmtree(data_to_analyze[num][2])
except OSError:
logger.error('OSError')
logger.error("Errors folder was packed and removed")
num += 1
#stmt = select([test.c.id, test.c.path])
# query_result = db_engine.execute(stmt)
#
# logger.info("Cleanup obsolete test results")
# for q in query_result:
# test_id = q.id
# test_path = q.path
# logger.info("Check data in directory: {}".format(test_path))
# if not os.path.exists(q.path):
# logger.info("Deleting test_id: {} path: {}".format(
# str(test_id), test_path))
# stm2 = server_monitoring_data.delete().where(
# server_monitoring_data.c.test_id == test_id)
# stm3 = test_action_data.delete().where(
# test_action_data.c.test_id == test_id)
# stm4 = test_data.delete().where(test_data.c.test_id == test_id)
# stm5 = test_action_aggregate_data.delete().where(
# test_action_aggregate_data.c.test_id == test_id)
# stm6 = test_error.delete().where(test_error.c.test_id == test_id)
# stm7 = test.delete().where(test.c.id == test_id)
#
# result2 = db_connection.execute(stm2)
# result3 = db_connection.execute(stm3)
# result4 = db_connection.execute(stm4)
# result5 = db_connection.execute(stm5)
# result6 = db_connection.execute(stm6)
# result7 = db_connection.execute(stm7)
| mit |
quimaguirre/diana | scripts/compare_profiles.py | 1 | 35855 | import argparse
import configparser
import copy
import ntpath
import numpy as np
import pandas as pd
import time
import sys, os, re
from context import diana
import diana.classes.drug as diana_drug
import diana.classes.comparison as comparison
import diana.classes.network_analysis as network_analysis
import diana.classes.functional_analysis as functional_analysis
import diana.classes.top_scoring as top_scoring
import diana.toolbox.wrappers as wrappers
def main():
"""
Generate profiles for drugs using GUILD.
Optimized for Python 3.
python /home/quim/PHD/Projects/DIANA/diana/scripts/compare_profiles.py -d1 DB11699 -d2 DB00177 -sif /home/quim/PHD/Projects/DIANA/diana/data/network_cheng.sif
"""
options = parse_user_arguments()
compare_profiles(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Compare the profiles of the input drugs",
epilog = "@oliva's lab 2017")
parser.add_argument('-j1','--job_id_drug1',dest='job_id_drug1',action = 'store',
help = """ Identifier of the drug number 1.
If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between
double quotes. """)
parser.add_argument('-j2','--job_id_drug2',dest='job_id_drug2',action = 'store',
help = """ Identifier of the drug number 2.
If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between
double quotes. """)
parser.add_argument('-sif','--sif_file',dest='sif',action = 'store',
help = 'Input file with a protein-protein interaction network in SIF format.')
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
""")
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def compare_profiles(options):
"""
Compares the profiles of two input drugs
"""
# Start marker for time measure
start = time.time()
print("\n\t\t------------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Second part: Comparison of drug profiles\n")
print("\t\t------------------------------------------------------------------------------------------------------------------------\n")
# Get the script path and define directories used
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
scripts_dir = os.path.join(main_path, 'scripts')
mappings_dir = os.path.join(main_path, 'mappings')
data_dir = os.path.join(main_path, 'data')
workspace_dir = options.workspace
create_directory(workspace_dir)
profiles_dir = os.path.join(workspace_dir, 'profiles')
# Create a directory for the results of the comparison
results_dir = os.path.join(workspace_dir, "comparisons")
create_directory(results_dir)
# Create directories for additional data
other_data_dir = os.path.join(workspace_dir, 'additional_data')
create_directory(other_data_dir)
random_networks_dir = os.path.join(other_data_dir, 'random_networks')
create_directory(random_networks_dir)
associations_dir = os.path.join(other_data_dir, 'gene_function_associations')
create_directory(associations_dir)
target_associations_dir = os.path.join(associations_dir, 'targets')
numbers_dir = os.path.join(other_data_dir, 'numbers')
create_directory(numbers_dir)
# Create a ComparisonResult instance
comparison_instance = comparison.ComparisonResult()
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = configparser.ConfigParser()
config.read(config_file)
#--------------------#
# SIF CONTROLLER #
#--------------------#
# SIF CONTROLLER: Checks the network in SIF format provided by the user.
# Check if the network file is provided
if options.sif and fileExist(options.sif):
# Get the network name
network_filename = ntpath.basename(options.sif)
network_associations_dir = os.path.join(associations_dir, network_filename)
else:
# If not, we output an error
print(' DIANA INFO:\tThe network SIF file is missing. Please, introduce the parameter -sif.\n\t\tIf you do not have a network, use one of the networks in the sif folder.\n')
sys.exit(10)
#------------------------------#
# CREATE/READ NUMBERS FILE #
#------------------------------#
# Define parameters for the functional enrichment
type_functions = ['gobp', 'gomf', 'reactome']
type_corrections = ['fdr_bh', 'bonferroni']
# Numbers file associated to all targets
target_numbers_file = os.path.join(numbers_dir, 'target_numbers.txt')
if not fileExist(target_numbers_file):
with open(target_numbers_file, 'w') as num_fd:
num_fd.write('#feature\tnumber\n')
# Get targets
drugbank_geneid_mapping_file = os.path.join(mappings_dir, 'drugbank_geneid_drug_target_interactions.txt')
targets = diana_drug.get_all_targets_from_mappings(drugbank_geneid_mapping_file)
num_fd.write('target\t{}\n'.format(len(targets)))
# Get PFAMs
geneid_target_mapping_file = os.path.join(mappings_dir, 'geneid_target_mappings.txt')
pfams = diana_drug.get_all_pfams_from_mappings(geneid_target_mapping_file)
num_fd.write('pfam\t{}\n'.format(len(pfams)))
# Get functions
for type_function in type_functions:
associations_file = os.path.join(target_associations_dir, '{}_to_gene.txt'.format(type_function))
functions = functional_analysis.get_functions_from_associations_file(associations_file)
num_fd.write('{}\t{}\n'.format(type_function, len(functions)))
# Get ATCs
drugbank_atc_file = os.path.join(mappings_dir, 'drugbank_drug_atc.txt')
level_to_ATCs = diana_drug.get_all_atcs_from_mappings(drugbank_atc_file)
for level in ['level1', 'level2', 'level3', 'level4', 'level5']:
ATCs = set(level_to_ATCs[level])
num_fd.write('atc-{}\t{}\n'.format(level, len(ATCs)))
# Get SEs
drugbank_side_effects_file = os.path.join(mappings_dir, 'drugbank_drug_side_effects.txt')
ses = diana_drug.get_all_ses_from_mappings(drugbank_side_effects_file)
num_fd.write('se\t{}\n'.format(len(ses)))
target_numbers_df = pd.read_csv(target_numbers_file, sep='\t', index_col=None)
# Numbers file associated to network
network_numbers_file = os.path.join(numbers_dir, '{}_numbers.txt'.format(network_filename))
if not fileExist(network_numbers_file):
with open(network_numbers_file, 'w') as num_fd:
num_fd.write('#feature\tnumber\n')
# We create a Network instance
network_instance = network_analysis.Network(network_file=options.sif, type_id='geneid', network_format='sif')
# We keep the number of nodes
num_fd.write('node\t{}\n'.format(network_instance.network.number_of_nodes()))
num_fd.write('edge\t{}\n'.format(network_instance.network.number_of_edges()))
# Get functions
for type_function in type_functions:
associations_file = os.path.join(network_associations_dir, '{}_to_gene.txt'.format(type_function))
functions = functional_analysis.get_functions_from_associations_file(associations_file)
num_fd.write('{}\t{}\n'.format(type_function, len(functions)))
network_numbers_df = pd.read_csv(network_numbers_file, sep='\t', index_col=None)
#-------------------#
# READ PROFILES #
#-------------------#
print(' DIANA INFO:\tREADING PROFILES\n')
# Get the list of thresholds to create the profiles
if options.threshold_list and fileExist(options.threshold_list):
threshold_list = get_values_from_threshold_file(options.threshold_list)
else:
threshold_list = [1, 2, 5, 'functions']
print(' DIANA INFO:\tList of percentages used to define the drug profiles: {}\n'.format(', '.join([str(x) for x in threshold_list])))
# Check if the directories of the drugs exist
if options.job_id_drug1:
drug_dir1 = os.path.join(profiles_dir, options.job_id_drug1)
check_directory(drug_dir1)
else:
print(' DIANA INFO:\tjob_id_drug1 parameter is missing. Please, introduce the parameter -j1 with the job identifier of the drug.\n')
sys.exit(10)
if options.job_id_drug2:
drug_dir2 = os.path.join(profiles_dir, options.job_id_drug2)
check_directory(drug_dir2)
else:
print(' DIANA INFO:\tjob_id_drug2 parameter is missing. Please, introduce the parameter -j2 with the job identifier of the drug.\n')
sys.exit(10)
# Read profiles for drug 1
drug_instance1, guild_profile_instance1, scored_network_instance1, target_function_results1, guild_results1 = read_drug_profiles(drug_dir=drug_dir1, mappings_dir=mappings_dir, target_associations_dir=target_associations_dir, network_associations_dir=network_associations_dir, threshold_list=threshold_list)
# Read profiles for drug 2
drug_instance2, guild_profile_instance2, scored_network_instance2, target_function_results2, guild_results2 = read_drug_profiles(drug_dir=drug_dir2, mappings_dir=mappings_dir, target_associations_dir=target_associations_dir, network_associations_dir=network_associations_dir, threshold_list=threshold_list)
#----------------------#
# COMPARE PROFILES #
#----------------------#
# Compare targets
targets_dict1 = comparison.generate_targets_dict_for_comparison(drug_instance1.targets)
targets_dict2 = comparison.generate_targets_dict_for_comparison(drug_instance2.targets)
num_targets = int(target_numbers_df[target_numbers_df['#feature'] == 'target']['number'])
summary_targets = comparison.calculate_comparison(targets_dict1, targets_dict2, num_targets)
comparison_instance.add_target_result('target', summary_targets)
print(summary_targets)
# Compare PFAMs
pfams_dict1 = comparison.generate_targets_dict_for_comparison(drug_instance1.pfams)
pfams_dict2 = comparison.generate_targets_dict_for_comparison(drug_instance2.pfams)
num_pfams = int(target_numbers_df[target_numbers_df['#feature'] == 'pfam']['number'])
summary_pfams = comparison.calculate_comparison(pfams_dict1, pfams_dict2, num_pfams)
comparison_instance.add_target_result('pfam', summary_pfams)
print(summary_pfams)
# Compare functional profiles
for type_function in type_functions:
num_target_functions = int(target_numbers_df[target_numbers_df['#feature'] == type_function]['number'])
for type_correction in type_corrections:
targets_functions_instance1 = target_function_results1['{}-{}'.format(type_function, type_correction)]
targets_functions_instance2 = target_function_results2['{}-{}'.format(type_function, type_correction)]
summary_target_functions = comparison.calculate_comparison(targets_functions_instance1.term_id_to_values, targets_functions_instance2.term_id_to_values, num_target_functions)
comparison_instance.add_target_result('{}-{}'.format(type_function, type_correction), summary_target_functions)
print('Target: {} - {}'.format(type_function, type_correction))
print(summary_target_functions)
# Compare GUILD profiles
num_nodes = int(network_numbers_df[network_numbers_df['#feature'] == 'node']['number'])
num_edges = int(network_numbers_df[network_numbers_df['#feature'] == 'edge']['number'])
for top_threshold in threshold_list:
if top_threshold == 'functions':
for type_function in type_functions:
num_network_functions = int(network_numbers_df[network_numbers_df['#feature'] == type_function]['number'])
for type_correction in type_corrections:
# Compare node profiles
print('NODE PROFILES, THRESHOLD: {} - {} - {}'.format(top_threshold, type_function, type_correction))
node_profile1_values = copy.copy(guild_results1['node-{}-{}-{}'.format(top_threshold, type_function, type_correction)].node_to_score)
node_profile2_values = copy.copy(guild_results2['node-{}-{}-{}'.format(top_threshold, type_function, type_correction)].node_to_score)
guild_profile1_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance1.node_to_score)
guild_profile2_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance2.node_to_score)
summary_nodes = comparison.calculate_comparison_top_scoring(node_profile1_values, guild_profile1_values, node_profile2_values, guild_profile2_values, num_nodes)
comparison_instance.add_guild_result('node', top_threshold, summary_nodes)
print(summary_nodes)
# Compare edge profiles
print('EDGE PROFILES, THRESHOLD: {} - {} - {}'.format(top_threshold, type_function, type_correction))
edge_profile1_values = comparison.generate_guild_dict_for_comparison(guild_results1['edge-{}-{}-{}'.format(top_threshold, type_function, type_correction)].edge_to_score)
edge_profile2_values = comparison.generate_guild_dict_for_comparison(guild_results2['edge-{}-{}-{}'.format(top_threshold, type_function, type_correction)].edge_to_score)
scored_network1_values = comparison.generate_guild_dict_for_comparison(scored_network_instance1.edge_to_score)
scored_network2_values = comparison.generate_guild_dict_for_comparison(scored_network_instance2.edge_to_score)
summary_edges = comparison.calculate_comparison_top_scoring(edge_profile1_values, scored_network1_values , edge_profile2_values, scored_network2_values, num_edges)
comparison_instance.add_guild_result('edge', top_threshold, summary_edges)
print(summary_edges)
# Compare functional profiles
print('FUNCTIONAL PROFILES, THRESHOLD: {} - {} - {}'.format(top_threshold, type_function, type_correction))
functional_profile1_values = copy.copy(guild_results1['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
functional_profile2_values = copy.copy(guild_results2['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
summary_functions = comparison.calculate_comparison(functional_profile1_values, functional_profile2_values, num_network_functions)
comparison_instance.add_guild_result('{}-{}'.format(type_function, type_correction), top_threshold, summary_functions)
print(summary_functions)
else:
# Compare node profiles
print('NODE PROFILES, THRESHOLD: {}'.format(top_threshold))
node_profile1_values = comparison.generate_guild_dict_for_comparison(guild_results1['node-{}'.format(top_threshold)].node_to_score)
node_profile2_values = comparison.generate_guild_dict_for_comparison(guild_results2['node-{}'.format(top_threshold)].node_to_score)
guild_profile1_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance1.node_to_score)
guild_profile2_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance2.node_to_score)
summary_nodes = comparison.calculate_comparison_top_scoring(node_profile1_values, guild_profile1_values, node_profile2_values, guild_profile2_values, num_nodes)
comparison_instance.add_guild_result('node', top_threshold, summary_nodes)
print(summary_nodes)
# Compare edge profiles
print('EDGE PROFILES, THRESHOLD: {}'.format(top_threshold))
edge_profile1_values = comparison.generate_guild_dict_for_comparison(guild_results1['edge-{}'.format(top_threshold)].edge_to_score)
edge_profile2_values = comparison.generate_guild_dict_for_comparison(guild_results2['edge-{}'.format(top_threshold)].edge_to_score)
scored_network1_values = comparison.generate_guild_dict_for_comparison(scored_network_instance1.edge_to_score)
scored_network2_values = comparison.generate_guild_dict_for_comparison(scored_network_instance2.edge_to_score)
summary_edges = comparison.calculate_comparison_top_scoring(edge_profile1_values, scored_network1_values , edge_profile2_values, scored_network2_values, num_edges)
comparison_instance.add_guild_result('edge', top_threshold, summary_edges)
print(summary_edges)
for type_function in type_functions:
num_network_functions = int(network_numbers_df[network_numbers_df['#feature'] == type_function]['number'])
for type_correction in type_corrections:
# Compare functional profiles
print('FUNCTIONAL PROFILES, THRESHOLD: {}'.format(top_threshold))
functional_profile1_values = copy.copy(guild_results1['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
functional_profile2_values = copy.copy(guild_results2['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
summary_functions = comparison.calculate_comparison(functional_profile1_values, functional_profile2_values, num_network_functions)
comparison_instance.add_guild_result('{}-{}'.format(type_function, type_correction), top_threshold, summary_functions)
print(summary_functions)
# Compare structures
similarity_results = []
if len(drug_instance1.smiles) > 0 and len(drug_instance2.smiles) > 0:
for smiles1 in drug_instance1.smiles:
for smiles2 in drug_instance2.smiles:
similarity_result = comparison.get_smiles_similarity_indigo(smiles1, smiles2, fp_type = "sub", metric = "tanimoto")
if similarity_result:
similarity_results.append(similarity_result)
if len(similarity_results) > 0:
similarity_results = np.mean(similarity_results)
comparison_instance.structure_result = similarity_results
print(' DIANA INFO:\tStructural similarity between the two drugs: {:.3f}\n'.format(similarity_results))
else:
similarity_results = None
print(' DIANA INFO:\tStructural similarity unavailable\n')
else:
similarity_results = None
print(' DIANA INFO:\tThe SMILES of the drugs are missing! It is not possible to compute the structural similarity\n')
# Compare ATC profiles
for level in ['level1', 'level2', 'level3', 'level4', 'level5']:
num_atcs = int(target_numbers_df[target_numbers_df['#feature'] == 'atc-{}'.format(level)]['number'])
ATCs1 = drug_instance1.level_to_ATCs[level]
ATCs2 = drug_instance2.level_to_ATCs[level]
ATCs1_dict = comparison.generate_targets_dict_for_comparison(ATCs1)
ATCs2_dict = comparison.generate_targets_dict_for_comparison(ATCs2)
summary_ATCs = comparison.calculate_comparison(ATCs1_dict, ATCs2_dict, num_atcs)
comparison_instance.atc_results[level] = summary_ATCs
print('ATC comparison: {}'.format(level))
print(summary_ATCs)
# Compare SE profiles
num_ses = int(target_numbers_df[target_numbers_df['#feature'] == 'se']['number'])
SEs1_dict = comparison.generate_targets_dict_for_comparison(drug_instance1.SEs)
SEs2_dict = comparison.generate_targets_dict_for_comparison(drug_instance2.SEs)
summary_SEs = comparison.calculate_comparison(SEs1_dict, SEs2_dict, num_ses)
comparison_instance.se_result = summary_SEs
print(summary_SEs)
# Calculate network proximity
network = wrappers.get_network(options.sif, only_lcc = True)
nodes_from = drug_instance1.targets_in_network
nodes_to = drug_instance2.targets_in_network
d, z, (mean, sd) = wrappers.calculate_proximity(network, nodes_from, nodes_to, min_bin_size = 2)
print (d, z, (mean, sd))
#-------------------#
# WRITE RESULTS #
#-------------------#
# Write the results table
comparison_id = '{}_vs_{}'.format(options.job_id_drug1, options.job_id_drug2)
results_table = os.path.join(results_dir, '{}.tsv'.format(comparison_id))
comparison_instance.output_results_table(results_table, threshold_list)
# End marker for time
end = time.time()
print('\n DIANA INFO:\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\n'.format(end - start, (end - start) / 60))
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def create_directory(directory):
"""
Checks if a directory exists and if not, creates it
"""
try:
os.stat(directory)
except:
os.mkdir(directory)
return
def check_file(file):
"""
Checks if a file exists and if not, raises FileNotFound exception
"""
if not fileExist(file):
raise FileNotFound(file)
def check_directory(directory):
"""
Checks if a directory exists and if not, raises DirNotFound exception
"""
try:
os.stat(directory)
except:
raise DirNotFound(directory)
def get_targets_in_sif_file(sif_file, targets):
"""
Get the targets that are inside the network given by the user
"""
targets_in_network = set()
str_tar = [str(x) for x in targets]
with open(sif_file, 'r') as sif_fd:
for line in sif_fd:
node1, score, node2 = line.strip().split('\t')
if node1 in str_tar:
targets_in_network.add(node1)
if node2 in str_tar:
targets_in_network.add(node2)
return list(targets_in_network)
def read_parameters_file(parameters_file):
"""
Reads the parameters file of a drug profile
"""
with open(parameters_file, 'r') as parameters_fd:
header = parameters_fd.readline()
content = parameters_fd.readline()
fields = content.strip().split('\t')
return fields
def read_drug_profiles(drug_dir, mappings_dir, target_associations_dir, network_associations_dir, threshold_list=[1, 2, 5, 'functions']):
"""
Read the profiles of a drug.
"""
# Check/Read parameters file
output_parameters_file = os.path.join(drug_dir, 'parameters.txt')
check_file(output_parameters_file)
parameters = read_parameters_file(output_parameters_file)
drugname = parameters[1]
# Create drug instance
drug_instance = diana_drug.Drug(drugname)
# Read target profile
target_dir = os.path.join(drug_dir, 'target_profiles')
target_file = os.path.join(target_dir, '{}_targets.txt'.format(drugname))
check_file(target_file)
drug_instance.obtain_targets_from_file(target_file, target_type_id='geneid')
print(' DIANA INFO:\tTARGETS OF {}: {}\n'.format(drugname, ', '.join(drug_instance.targets)))
# Read PFAM profile
pfam_file = os.path.join(target_dir, 'pfam_profile.txt')
if fileExist(pfam_file):
drug_instance.obtain_pfams_from_file(pfam_file)
else:
# Obtain the PFAMs from a table
target_mapping_file = os.path.join(mappings_dir, 'geneid_target_mappings.txt')
drug_instance.obtain_pfams_from_geneid_target_table(drug_instance.targets, target_mapping_file)
# Read target-functional profiles
type_functions = ['gobp', 'gomf', 'reactome']
type_corrections = ['fdr_bh', 'bonferroni']
target_function_results = {}
for type_function in type_functions:
associations_file = os.path.join(target_associations_dir, '{}_to_gene.txt'.format(type_function))
for type_correction in type_corrections:
targets_functional_file = os.path.join(target_dir, 'targets_functional_profile_{}_{}.txt'.format(type_function, type_correction))
if fileExist(targets_functional_file):
targets_functions_instance = network_analysis.FunctionalProfile(targets_functional_file, 'targets', 'targets')
else:
top_scoring.functional_top_scoring(top_geneids=drug_instance.targets, type_correction=type_correction, associations_file=associations_file, output_file=targets_functional_file)
target_function_results['{}-{}'.format(type_function, type_correction)] = targets_functions_instance
# Read GUILD node scores
guild_dir = os.path.join(drug_dir, 'guild_profiles')
scores_file = os.path.join(guild_dir, 'output_scores.sif.netcombo')
check_file(scores_file)
guild_profile_instance = network_analysis.GUILDProfile(scores_file, type_id='geneid', top=100, top_type='percentage')
drug_instance.targets_in_network = set([target for target in drug_instance.targets if target in guild_profile_instance.node_to_score.keys()])
# Read GUILD edge scores
scored_network_file = os.path.join(guild_dir, 'network_scored.txt')
check_file(scored_network_file)
scored_network_instance = network_analysis.EdgeProfile(network_file=scored_network_file, type_id='geneid', network_format='sif', top=100)
# Read GUILD profiles
guild_results = {}
for top_threshold in threshold_list:
if top_threshold == 'functions':
# Read profiles associated to a functions threshold
for type_function in type_functions:
associations_file = os.path.join(network_associations_dir, '{}_to_gene.txt'.format(type_function))
for type_correction in type_corrections:
# Obtain cut-off
output_sliding_window_file = os.path.join(guild_dir, 'sliding_window_{}_{}.txt'.format(type_function, type_correction))
if fileExist(output_sliding_window_file):
cutoff_central_position, cutoff_right_interval = functional_analysis.read_sliding_window_file(output_sliding_window_file=output_sliding_window_file, num_seeds=len(drug_instance.targets_in_network))
else:
cutoff_central_position, cutoff_right_interval = functional_analysis.calculate_functions_threshold(seed_geneids=drug_instance.targets_in_network, geneid_to_score=guild_profile_instance.node_to_score, type_correction=type_correction, associations_file=associations_file, output_sliding_window_file=output_sliding_window_file, output_seeds_enrichment_file=None, seed_functional_enrichment=False)
print('{} - {} - {} - {}: Cut-off central position: {}. Cut-off right interval position: {}'.format(drug_instance.drug_name, top_threshold, type_function, type_correction, cutoff_central_position, cutoff_right_interval))
# Obtain node profile
node_file = os.path.join(guild_dir, 'node_profile_top_{}_{}_{}_{}.txt'.format('functions', type_function, type_correction, guild_profile_instance.type_id))
if fileExist(node_file):
node_profile_instance = network_analysis.GUILDProfile(scores_file=node_file, type_id=guild_profile_instance.type_id, top=cutoff_right_interval, top_type='number_of_nodes')
else:
node_profile_instance = guild_profile_instance.create_node_profile(threshold=cutoff_right_interval, threshold_type='number_of_nodes', output_file=node_file)
guild_results['node-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = node_profile_instance
# Obtain edge profile
edge_file = os.path.join(guild_dir, 'edge_profile_top_{}_{}_{}_{}.txt'.format('functions', type_function, type_correction, guild_profile_instance.type_id))
if fileExist(edge_file):
edge_profile_instance = network_analysis.EdgeProfile(network_file=edge_file, type_id=guild_profile_instance.type_id, network_format='sif', top=cutoff_right_interval, top_type='number_of_nodes')
else:
edge_profile_instance = scored_network_instance.create_edge_profile(node_to_score=guild_profile_instance.node_to_score, threshold=cutoff_right_interval, threshold_type='number_of_nodes', output_file=edge_file)
guild_results['edge-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = edge_profile_instance
# Obtain functional profile
function_file = os.path.join(guild_dir, 'functional_profile_top_{}_{}_{}.txt'.format('functions', type_function, type_correction))
if fileExist(function_file):
functional_profile_instance = network_analysis.FunctionalProfile(functional_file=function_file, top=cutoff_right_interval, node_file=node_file)
else:
functional_profile_instance = node_profile_instance.create_functional_profile(type_correction=type_correction, output_file=function_file, associations_file=associations_file)
guild_results['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = functional_profile_instance
else:
# Obtain node profile
node_file = os.path.join(guild_dir, 'node_profile_top_{}_{}.txt'.format(str(top_threshold), guild_profile_instance.type_id))
check_file(node_file)
node_profile_instance = network_analysis.GUILDProfile(node_file, type_id=guild_profile_instance.type_id, top=top_threshold, top_type='percentage')
guild_results['node-{}'.format(top_threshold)] = node_profile_instance
# Obtain edge profiles
edge_file = os.path.join(guild_dir, 'edge_profile_top_{}_{}.txt'.format(str(top_threshold), guild_profile_instance.type_id))
check_file(edge_file)
edge_profile_instance = network_analysis.EdgeProfile(network_file=edge_file, type_id=guild_profile_instance.type_id, network_format='sif', top=top_threshold, top_type='percentage')
guild_results['edge-{}'.format(top_threshold)] = edge_profile_instance
# Obtain functional profiles
for type_function in type_functions:
for type_correction in type_corrections:
functional_file = os.path.join(guild_dir, 'functional_profile_top_{}_{}_{}.txt'.format(str(top_threshold), type_function, type_correction))
if fileExist(functional_file):
functional_profile_instance = network_analysis.FunctionalProfile(functional_file=functional_file, top=top_threshold, node_file=node_file)
else:
functional_profile_instance = node_profile_instance.create_functional_profile(type_correction=type_correction, output_file=functional_file, associations_file=associations_file)
guild_results['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = functional_profile_instance
# Read structures
structure_file = os.path.join(drug_dir, 'structure_profiles/structure_profile.txt')
if fileExist(structure_file):
drug_instance.obtain_SMILES_from_file(structure_file)
else:
# Obtain SMILES from a table
drug_mapping_file = os.path.join(mappings_dir, 'drugbank_drug_mappings.txt')
drugbank_smiles_file = os.path.join(mappings_dir, 'drugbank_drug_smiles.txt')
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Search SMILES in the table
drug_instance.obtain_SMILES_from_table(drugbankids, drugbank_smiles_file)
# Read ATCs
atc_file = os.path.join(drug_dir, 'atc_profiles/ATC_profile.txt')
if fileExist(atc_file):
drug_instance.obtain_ATCs_from_file(atc_file)
else:
# Obtain ATCs from a table
drug_mapping_file = os.path.join(mappings_dir, 'drugbank_drug_mappings.txt')
drugbank_atc_file = os.path.join(mappings_dir, 'drugbank_drug_atc.txt')
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Search ATCs in the table
drug_instance.obtain_ATCs_from_table(drugbankids, drugbank_atc_file)
# Read side effects
se_file = os.path.join(drug_dir, 'se_profiles/SE_profile.txt')
if fileExist(se_file):
drug_instance.obtain_SE_from_file(se_file)
else:
# Obtain side effects from a table
drugbank_side_effects_file = os.path.join(mappings_dir, 'drugbank_drug_side_effects.txt')
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Search side effects in the table
drug_instance.obtain_SE_from_table(drugbankids, drugbank_side_effects_file)
return drug_instance, guild_profile_instance, scored_network_instance, target_function_results, guild_results
class FileNotFound(Exception):
"""
Exception raised when a file is not found.
"""
def __init__(self, file):
self.file = file
def __str__(self):
return 'The file {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the profiles have been correctly generated.\n'.format(self.file)
class DirNotFound(Exception):
"""
Exception raised when a directory is not found.
"""
def __init__(self, directory):
self.directory = directory
def __str__(self):
return 'The directory {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the parameters have been correctly introduced and the profiles have been correctly generated.\n'.format(self.directory)
if __name__ == "__main__":
main()
| mit |
martin-hunt/foobar | docs/conf.py | 1 | 8160 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../foobar")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'foobar'
copyright = u'2014, Martin Hunt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from foobar import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'foobar-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'foobar Documentation',
u'Martin Hunt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit |
deepakantony/sms-tools | lectures/05-Sinusoidal-model/plots-code/sineModelAnal-bendir.py | 24 | 1245 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
w = np.hamming(2001)
N = 2048
H = 200
t = -80
minSineDur = .02
maxnSines = 150
freqDevOffset = 10
freqDevSlope = 0.001
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
plt.figure(1, figsize=(9.5, 7))
maxplotfreq = 800.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (bendir.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-bendir.png')
plt.show() | agpl-3.0 |
gfyoung/pandas | pandas/tests/series/methods/test_to_csv.py | 3 | 6229 | from datetime import datetime
from io import StringIO
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
from pandas.io.common import get_handle
class TestSeriesToCSV:
def read_csv(self, path, **kwargs):
params = {"squeeze": True, "index_col": 0, "header": None, "parse_dates": True}
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params)
if header is None:
out.name = out.index.name = None
return out
def test_from_csv(self, datetime_series, string_series):
# freq doesnt round-trip
datetime_series.index = datetime_series.index._with_freq(None)
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
ts = self.read_csv(path)
tm.assert_series_equal(datetime_series, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
# see gh-10483
datetime_series.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
string_series.to_csv(path, header=False)
series = self.read_csv(path)
tm.assert_series_equal(string_series, series, check_names=False)
assert series.name is None
assert series.index.name is None
string_series.to_csv(path, header=True)
series_h = self.read_csv(path, header=0)
assert series_h.name == "series"
with open(path, "w") as outfile:
outfile.write("1998-01-01|1.0\n1999-01-01|2.0")
series = self.read_csv(path, sep="|")
check_series = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0}
)
tm.assert_series_equal(check_series, series)
series = self.read_csv(path, sep="|", parse_dates=False)
check_series = Series({"1998-01-01": 1.0, "1999-01-01": 2.0})
tm.assert_series_equal(check_series, series)
def test_to_csv(self, datetime_series):
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
with open(path, newline=None) as f:
lines = f.readlines()
assert lines[1] != "\n"
datetime_series.to_csv(path, index=False, header=False)
arr = np.loadtxt(path)
tm.assert_almost_equal(arr, datetime_series.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series(["\u05d0", "d2"], index=["\u05d0", "\u05d1"])
s.to_csv(buf, encoding="UTF-8", header=False)
buf.seek(0)
s2 = self.read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_series_equal(s, s2)
def test_to_csv_float_format(self):
with tm.ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format="%.2f", header=False)
rs = self.read_csv(filename)
xp = Series([0.12, 0.23, 0.57])
tm.assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(["jack and jill", "jesse and frank"])
split = s.str.split(r"\s+and\s+")
buf = StringIO()
split.to_csv(buf, header=False)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path_or_buf=None, header=False)
assert isinstance(csv_str, str)
@pytest.mark.parametrize(
"s,encoding",
[
(
Series([0.123456, 0.234567, 0.567567], index=["A", "B", "C"], name="X"),
None,
),
# GH 21241, 21118
(Series(["abc", "def", "ghi"], name="X"), "ascii"),
(Series(["123", "你好", "世界"], name="中文"), "gb2312"),
(Series(["123", "Γειά σου", "Κόσμε"], name="Ελληνικά"), "cp737"),
],
)
def test_to_csv_compression(self, s, encoding, compression):
with tm.ensure_clean() as filename:
s.to_csv(filename, compression=compression, encoding=encoding, header=True)
# test the round trip - to_csv -> read_csv
result = pd.read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
squeeze=True,
)
tm.assert_series_equal(s, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
s.to_csv(handles.handle, encoding=encoding, header=True)
result = pd.read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
squeeze=True,
)
tm.assert_series_equal(s, result)
# explicitly ensure file was compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
assert s.name in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_series_equal(
s, pd.read_csv(fh, index_col=0, squeeze=True, encoding=encoding)
)
def test_to_csv_interval_index(self):
# GH 28210
s = Series(["foo", "bar", "baz"], index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
s.to_csv(path, header=False)
result = self.read_csv(path, index_col=0, squeeze=True)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = s.copy()
expected.index = expected.index.astype(str)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
zanton123/HaSAPPy | program/DesignGeneInsertion.py | 1 | 9186 | # -*- coding: utf-8 -*-
"""
Created on Tue May 24 08:20:07 2016
@author: GDM
"""
#### Importing modules ####
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import HTSeq
import cPickle as pickle
import os
import re
mpl.interactive(False)
####
#### Class definition ####
class GeneII():
def __init__(self,group_name):
self.name = group_name
self.FW =[]
self.RV =[]
####
#### Functions definition ####
def start(Info):
####
def group_generation(Info,genes,GroupAnalysis,GroupAnalysis_group,rank):
####
def upload_informations(Info,block):
for exp in block['input']: #each replicate experiment
with open (exp, 'rb') as handle: #openin Row Series
insertions = pickle.load(handle).row
for gene in genes:#iteration for each gene of interest
if type(gene) == HTSeq._HTSeq.GenomicInterval:
iv = gene
else:
iv = genes_ref.loc[gene,'genomic_interval'].copy()#genomic interval of the gene of intersts
iv.strand = '.' #remove the strand parameter (for the moment)
selected_ins= [(i,insertions[i]) for i in insertions.index if iv.contains(i)] #list of insertions in the replicate experiment containined in gene
for i in selected_ins: #divide in sense and anti-sense insertions
if type(gene) == HTSeq._HTSeq.GenomicInterval:
if i[0].strand == '+':
block[gene].FW.append(i)
else:
block[gene].RV.append(i)
else:
if i[0].strand == genes[gene]['genomic_interval'].strand:
block[gene].FW.append(i)
else:
block[gene].RV.append(i)
return block
####
block = {}
block['input'] = []
if rank == 'NaN':
block['name'] = GroupAnalysis_group.name
else:
block['name'] = GroupAnalysis_group.name[rank]
for gene in genes:
block[gene] = GeneII(block['name'])
if rank == 'NaN':
experiments = GroupAnalysis_group.experiments
else:
experiments = GroupAnalysis_group.experiments[rank]
for exp in experiments:
pos = GroupAnalysis.lib_names.index(exp)
address = GroupAnalysis.input_files[pos]
with open (address,'rb') as handle:
block['input'].append(pickle.load(handle).input)
block = upload_informations(Info,block)
return block
####
def draw_gene(Info,name,gene,group_reference,group_other,storage_loc):
if type(name) == HTSeq._HTSeq.GenomicInterval:
genomic_interval = True
else:
genomic_interval = False
fig1 = plt.figure()
if genomic_interval:
fig1.suptitle('%s'%name)
else:
fig1.suptitle('%s (%s)'%(name,gene['genomic_interval']))
ax1 = fig1.add_subplot(311)
ax2 = fig1.add_subplot(312)
ax3 = fig1.add_subplot(313)
if genomic_interval:
start_pos = name.start_d
end_pos = name.end_d
else:
start_pos = gene['genomic_interval'].start_d
end_pos = gene['genomic_interval'].end_d
#To define max read value in the two libraries for get ymax position
x_all,y_all = zip(*(group_reference.FW + group_reference.RV + group_other.FW +group_other.RV))
ymax = max(y_all)
#Eventually add here log schale if ymax > ...
#Design Scatter for 1st experiment
ax1.axis(xmin =start_pos, xmax = end_pos, ymin = 0, ymax = ymax) #First scatter plot
ax1.set_ylabel(group_other.name) #Name of the experiment to show in ylabel
ax1.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
if ymax > 200:
ax1.set_yscale('log')
ax1.axis(xmin =start_pos, xmax = end_pos, ymin = 1, ymax = ymax)
for i in group_other.FW: # Plotting points according thier value. Devided in two colors: red if sense, green if anti-sense
ax1.scatter(i[0].pos,i[1],s = 1, color = 'r')
for i in group_other.RV:
ax1.scatter(i[0].pos,i[1],s = 1, color = 'g')
#Design Scatter for 2nd experiment
ax3.axis(xmin =start_pos, xmax = end_pos, ymin = 0, ymax =ymax)
ax3.invert_yaxis()
ax3.set_ylabel(group_reference.name) #Name of the experiment to show in ylabel
ax3.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
if ymax > 200:
ax3.set_yscale('log')
ax3.axis(xmin =start_pos, xmax = end_pos, ymin = 1, ymax = ymax)
ax3.invert_yaxis()
for i in group_reference.FW: # Plotting points according thier value. Devided in two colors: red if sense, green if anti-sense
ax3.scatter(i[0].pos,i[1],s = 1, color = 'r')
for i in group_reference.RV:
ax3.scatter(i[0].pos,i[1],s = 1, color = 'g')
#Design gene models
if genomic_interval:
transcripts = gene
else:
transcripts = gene['variants']
ax2.axis([start_pos,end_pos,0,len(transcripts)+1])
ax2.axis('off')
y_value = 0 #location of gene in y axes, acccording to transcripts number
for transcript in transcripts:
y_value +=1 #move 1 up
ax2.text((end_pos), # Transcript name starting position x-axis (end of gene)
(y_value-0.2), # Transcript name starting position y-axis
(' ' + transcript),fontsize = 4)
#line rapresenting all the transcript length
ax2.plot([min([exon.start_d for exon in transcripts[transcript]]), max([exon.end_d for exon in transcripts[transcript]])],[y_value,y_value],'b',linewidth = 2./len(transcripts))
for exon in transcripts[transcript]:
ax2.add_patch(patches.Rectangle(
(exon.start,(y_value-0.2)), exon.length,0.4,linewidth = 0.1))
fig1.show()
if genomic_interval:
fig1.savefig(os.path.join(storage_loc,'%s_%svs%s.svg'%('interval',group_other.name,group_reference.name)))
else:
fig1.savefig(os.path.join(storage_loc,'%s_%svs%s.svg'%(name,group_other.name,group_reference.name)))
####
genome = HTSeq.GenomicArrayOfSets("auto", stranded = False)
with open(Info.Design.reference, 'rb') as handle:
genes_ref = pickle.load(handle)
for gene in genes_ref.index:
genome[genes_ref.ix[gene,'genomic_interval']] += gene
genes ={}
for gene in Info.Design.genes:
if re.search ('\((.+)_(.+)_(.+)\)',gene):
value = re.findall ('\((.+)_(.+)_(.+)\)',gene)[0]
iv = HTSeq.GenomicInterval(value[0],int(value[1]),int(value[2]),'.')
genes_in_iv = [x[1] for x in genome[iv].steps()]
genes_selected = set()
for group in genes_in_iv:
for gene in group:
genes_selected.add(gene)
genes[iv] = {}
for gene in genes_selected:
genes[iv][gene] = genes_ref.loc[gene,'variants'].values()[0]
else:
genes[gene] = genes_ref.loc[gene]
with open (Info.Design.input_files,'rb') as handle: #loading GroupAnlaysis class exential to get all information on the samples
GroupAnalysis = pickle.load(handle)
reference = group_generation(Info,genes,GroupAnalysis,GroupAnalysis.Reference,'NaN')
others = []
for group in GroupAnalysis.Others.name:
others.append(group_generation(Info,genes,GroupAnalysis,GroupAnalysis.Others,GroupAnalysis.Others.name.index(group)))
for name in genes:
for group in others:
draw_gene(Info,name,genes[name],reference[name],group[name],GroupAnalysis.storage_loc)
| mit |
sunshineDrizzle/FreeROI | froi/algorithm/unused/spectralmapper.py | 6 | 3516 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Mapper for spectral clustering.
Date: 2012.05.29
"""
__docformat__ = 'restructuredtext'
import numpy as np
import scipy.sparse as sp
from mvpa2.base import warning
from mvpa2.base.dochelpers import _str, borrowkwargs, _repr_attrs
from mvpa2.mappers.base import accepts_dataset_as_samples, Mapper
from mvpa2.datasets.base import Dataset
from mvpa2.datasets.miscfx import get_nsamples_per_attr, get_samples_by_attr
from mvpa2.support import copy
from sklearn.cluster import SpectralClustering
class SpectralMapper(Mapper):
"""Mapper to do spectral clustering
"""
def __init__(self, chunks_attr=None, k=8, mode='arpack', random_state=None, n_init=10, **kwargs):
"""
parameters
__________
chunks_attr : str or None
If provided, it specifies the name of a samples attribute in the
training data, unique values of which will be used to identify chunks of
samples, and to perform individual clustering within them.
k : int or ndarray
The number of clusters to form as well as the number of centroids to
generate. If init initialization string is matrix, or if a ndarray
is given instead, it is interpreted as initial cluster to use instead
mode : {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init : int
Number of iterations of the k-means algrithm to run. Note that this
differs in meaning from the iters parameter to the kmeans function.
"""
Mapper.__init__(self, **kwargs)
self.__chunks_attr = chunks_attr
self.__k = k
self.__mode = mode
self.__random_state = random_state
self.__n_init = n_init
def __repr__(self, prefixes=[]):
return super(KMeanMapper, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['chunks_attr', 'k', 'mode', 'random_state', 'n_init']))
def __str__(self):
return _str(self)
def _forward_dataset(self, ds):
chunks_attr = self.__chunks_attr
mds = Dataset([])
mds.a = ds.a
# mds.sa =ds.sa
# mds.fa =ds.fa
if chunks_attr is None:
# global kmeans
mds.samples = self._spectralcluster(ds.samples).labels_
print max(mds.samples)
else:
# per chunk kmeans
for c in ds.sa[chunks_attr].unique:
slicer = np.where(ds.sa[chunks_attr].value == c)[0]
mds.samples = ds.samples[0,:]
mds.samples[slicer] = self._spectralcluster(ds.samples[slicer]).labels_
return mds
def _spectralcluster(self, samples):
if sp.issparse(samples):
samples = samples.todense()
print np.shape(samples)
samples = np.exp(-samples/samples.std())
return SpectralClustering(k=self.__k, n_init=self.__n_init, mode=self.__mode).fit(samples)
| bsd-3-clause |
alvations/oque | que.py | 1 | 8287 |
import io, sys
import numpy as np
from scipy.stats import uniform as sp_rand
from itertools import combinations
from sklearn.linear_model import BayesianRidge
from sklearn.grid_search import RandomizedSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from o import cosine_feature, complexity_feature
train, test = 'training', 'test'
def load_quest(direction, dataset, which_data, quest_data_path='quest/',
to_normalize=True):
'''
# USAGE:
baseline_train = load_quest('en-de', 'training', 'baseline17')
baseline_train = load_quest('en-de', 'test', 'baseline17')
meteor_train = load_quest('en-de', 'training', 'meteor')
meteor_test = load_quest('en-de', 'test', 'meteor')
'''
x = np.loadtxt(quest_data_path+direction+'.'+dataset+'.'+which_data)
if to_normalize:
x = x / np.linalg.norm(x)
return x
def load_wmt15_data(direction):
# Load train data
baseline_train = load_quest(direction, train, 'baseline17', to_normalize=False)
meteor_train = load_quest(direction, train, 'meteor', to_normalize=False)
# Load test data
baseline_test = load_quest(direction, test, 'baseline17', to_normalize=False)
meteor_test = load_quest(direction, test, 'meteor', to_normalize=False)
return baseline_train, meteor_train, baseline_test, meteor_test
def load_cosine_features(direction):
cos_train = cosine_feature(direction, train)
#cos_train = complexity_feature(direction, train)
cos_test = cosine_feature(direction, test)
#cos_test = complexity_feature(direction, test)
return cos_train, cos_test
def train_classiifer(X_train, y_train, to_tune, classifier):
# Initialize Classifier.
clf = BayesianRidge()
clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
#clf = RandomForestRegressor()
if classifier:
clf = classifier
to_tune = False
if to_tune:
# Grid search: find optimal classifier parameters.
param_grid = {'alpha_1': sp_rand(), 'alpha_2': sp_rand()}
param_grid = {'C': sp_rand(), 'gamma': sp_rand()}
rsearch = RandomizedSearchCV(estimator=clf,
param_distributions=param_grid, n_iter=5000)
rsearch.fit(X_train, y_train)
# Use tuned classifier.
clf = rsearch.best_estimator_
# Trains Classifier
clf.fit(X_train, y_train)
return clf
def brute_force_feature_selection():
x = range(1,18)
for l in range (1,len(x)+1):
for f in list(combinations(range(0,len(x)),l)):
yield f
def evaluate_classifier(clf, X_test, direction, with_cosine,
to_tune, to_output=True, to_hack=False):
answers = list(clf.predict(X_test))
if to_hack:
hacked_answers = []
for i,j in zip(answers, X_test):
if j[9] > 0.7 and j[0] < 12: i = i - 0.2;
if j[0] ==1 or j[1]== 1: i = i - 0.15;
if j[0] > 200: i = i - 0.1;
if i < 0: i = 0.0;
hacked_answers.append(i)
answers = hacked_answers
outfile_name = ''
if to_output: # Outputs to file.
to_tune_str = 'tuned' if to_tune else 'notune'
model_name = 'withcosine' if with_cosine else 'baseline'
outfile_name = ".".join(['oque',model_name,
to_tune_str,direction,'output'])
with io.open(outfile_name, 'w') as fout:
for i in answers:
fout.write(unicode(i)+'\n')
return answers, outfile_name
def brute_force_classification(X_train, y_train, X_test, y_test,
direction, with_cosine,
to_tune, to_output=False, to_hack=False):
#score_fout = io.open('que.'+direction+'.scores', 'w')
for f in brute_force_feature_selection():
_X_train = X_train[:, f]
_X_test = X_test[:, f]
# Train classifier
clf = train_classiifer(_X_train, y_train, to_tune, classifier=None)
answers, outfile_name = evaluate_classifier(clf, _X_test, direction,
with_cosine, to_tune,
to_output=False, to_hack=False)
mse = mean_squared_error(y_test, np.array(answers))
mae = mean_absolute_error(y_test, np.array(answers))
outfile_name = "results/oque.baseline." + direction +'.'+str(mae) + '.'
outfile_name+= "-".join(map(str, f))+'.output'
with io.open(outfile_name, 'w') as fout:
for i in answers:
fout.write(unicode(i)+'\n')
print mae, f
sys.stdout.flush()
def experiments(direction, with_cosine, to_tune, to_output=True, to_hack=False,
to_debug=False, classifier=None):
'''
# USAGE:
direction = 'en-de'
to_tune = False
with_cosine = False
outfilename, mae, mse = experiments(direction, to_tune, with_cosine)
print outfilename, mae, mse
'''
# Create training and testing array and outputs
X_train, y_train, X_test, y_test = load_wmt15_data(direction)
if with_cosine:
# Create cosine features for training
cos_train, cos_test = load_cosine_features(direction)
X_train = np.concatenate((X_train, cos_train), axis=1)
X_test = np.concatenate((X_test, cos_test), axis=1)
brute_force_classification(X_train, y_train, X_test, y_test, direction,
with_cosine, to_tune, to_output=False,
to_hack=False)
'''
# Best setup for EN-DE up till now.
f = (2, 9, 13)
_X_train = X_train[:, f]
_X_test = X_test[:, f]
clf = train_classiifer(_X_train, y_train, to_tune, classifier=None)
answers, outfile_name = evaluate_classifier(clf, _X_test, direction,
with_cosine, to_tune,
to_output=True, to_hack=False)
'''
mse = mean_squared_error(y_test, np.array(answers))
mae = mean_absolute_error(y_test, np.array(answers))
if to_debug:
srcfile = io.open('quest/en-de_source.test', 'r')
trgfile = io.open('quest/en-de_target.test', 'r')
cos_train, cos_test = load_cosine_features(direction)
for i,j,k,s,t, c in zip(answers, y_test, X_test,
srcfile, trgfile, cos_test):
if i - j > 0.095 or j -1 > 0.095 or c == 9.99990000e-11:
print i, j, k[0], k[9], k, c
print s, t
return outfile_name, mae, mse
direction = 'en-de'
with_cosine = False
to_tune = False
to_output = False
outfilename, mae, mse = experiments(direction, with_cosine,to_tune, to_output, to_debug=False)
print outfilename, mae, mse
# DE-EN
# no-hack at all
# oque.baseline.notune.de-en.output 0.0692666454858 0.011038250617
# no-hack, with cosine
# oque.withcosine.notune.de-en.output 0.0692590476386 0.0110349222335
# Super default + hack
# oque.baseline.notune.de-en.output 0.0685437539196 0.0106677292505
# hacked
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# oque.withcosine.notune.de-en.output 0.0685361560723 0.0106643693054
# EN-DE
# oque.baseline.notune.en-de.output 0.0980804849285 0.0184924281565
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# oque.baseline.notune.en-de.output 0.097544087243 0.0208756823852
# oque.withcosine.notune.en-de.output 0.0975427119756 0.0208755274686
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.1
# oque.withcosine.notune.en-de.output 0.0973017481202 0.0207602928984
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# oque.withcosine.notune.en-de.output 0.0972310140807 0.0207568924808
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# if j[0] > 200: i = i - 0.1
# oque.withcosine.notune.en-de.output 0.0968903228194 0.0206775825255
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# if j[0] > 200: i = i - 0.1
# if i < 0: i = 0.0
# oque.withcosine.notune.en-de.output 0.0968359771138 0.0206633629455 | mit |
glennq/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
abhisg/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
inkenbrandt/WellApplication | wellapplication/hydropy.py | 1 | 5866 | """
Hydropy package
@author: Stijn Van Hoey
from: https://github.com/stijnvanhoey/hydropy/tree/master/hydropy
for a better and more up to date copy of this script go to the original repo.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import numpy as np
from scipy.optimize import curve_fit
def get_baseflow_chapman(flowserie, recession_time):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
Notes
------
$$Q_b(i) = \frac{k}{2-k}Q_b(i-1) + \frac{1-k}{2-k}Q(i)$$
"""
secterm = (1.-recession_time)*flowserie/(2.-recession_time)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(2.-recession_time) + \
secterm.values[i]
baseflow = pd.DataFrame(baseflow, index=flowserie.index)
return baseflow
def get_baseflow_boughton(flowserie, recession_time, baseflow_index):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
baseflow_index : float
Notes
------
$$Q_b(i) = \frac{k}{1+C}Q_b(i-1) + \frac{C}{1+C}Q(i)$$
"""
parC = baseflow_index
secterm = parC*flowserie/(1 + parC)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(1 + parC) + \
secterm.values[i]
return pd.DataFrame(baseflow, index=flowserie.index)
def get_baseflow_ihacres(flowserie, recession_time, baseflow_index, alfa):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
Notes
------
$$Q_b(i) = \frac{k}{1+C}Q_b(i-1) + \frac{C}{1+C}[Q(i)+\alpha Q(i-1)]$$
$\alpha$ < 0.
"""
parC = baseflow_index
secterm = parC/(1 + parC)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time * baseflow[i-1]/(1 + parC) + \
secterm * (flowserie.values[i] +
alfa * flowserie.values[i-1])
return pd.DataFrame(baseflow, index=flowserie.index)
def exp_curve(x, a, b):
"""Exponential curve used for rating curves"""
return (a * x**b)
def ratingCurve(discharge, stage):
"""Computes rating curve based on discharge measurements coupled with stage
readings.
discharge = array of measured discharges;
stage = array of corresponding stage readings;
Returns coefficients a, b for the rating curve in the form y = a * x**b
"""
popt, pcov = curve_fit(exp_curve, stage, discharge)
def r_squ():
a = 0.0
b = 0.0
for i, j in zip(discharge, stage):
a += (i - exp_curve(j, popt[0], popt[1]))**2
b += (i - np.mean(discharge))**2
return 1 - a / b
return popt, r_squ()
def RB_Flashiness(series):
"""Richards-Baker Flashiness Index for a series of daily mean discharges.
https://github.com/hydrogeog/hydro"""
Qsum = np.sum(series) # sum of daily mean discharges
Qpath = 0.0
for i in range(len(series)):
if i == 0:
Qpath = series[i] # first entry only
else:
Qpath += np.abs(series[i] - series[i-1]) # sum the absolute differences of the mean discharges
return Qpath/Qsum
def flow_duration(series):
"""Creates the flow duration curve for a discharge dataset. Returns a pandas
series whose index is the discharge values and series is exceedance probability.
https://github.com/hydrogeog/hydro"""
fd = pd.Series(series).value_counts() # frequency of unique values
fd.sort_index(inplace=True) # sort in order of increasing discharges
fd = fd.cumsum() # cumulative sum of frequencies
fd = fd.apply(lambda x: 100 - x/fd.max() * 100) # normalize
return fd
def Lyne_Hollick(series, alpha=.925, direction='f'):
"""Recursive digital filter for baseflow separation. Based on Lyne and Hollick, 1979.
series = array of discharge measurements
alpha = filter parameter
direction = (f)orward or (r)everse calculation
https://github.com/hydrogeog/hydro
"""
series = np.array(series)
f = np.zeros(len(series))
if direction == 'f':
for t in np.arange(1,len(series)):
f[t] = alpha * f[t-1] + (1 + alpha)/2 * (series[t] - series[t-1])
if series[t] - f[t] > series[t]:
f[t] = 0
elif direction == 'r':
for t in np.arange(len(series)-2, 1, -1):
f[t] = alpha * f[t+1] + (1 + alpha)/2 * (series[t] - series[t+1])
if series[t] - f[t] > series[t]:
f[t] = 0
return np.array(series - f)
def Eckhardt(series, alpha=.98, BFI=.80):
"""Recursive digital filter for baseflow separation. Based on Eckhardt, 2004.
series = array of discharge measurements
alpha = filter parameter
BFI = BFI_max (maximum baseflow index)
https://github.com/hydrogeog/hydro
"""
series = np.array(series)
f = np.zeros(len(series))
f[0] = series[0]
for t in np.arange(1,len(series)):
f[t] = ((1 - BFI) * alpha * f[t-1] + (1 - alpha) * BFI * series[t]) / (1 - alpha * BFI)
if f[t] > series[t]:
f[t] = series[t]
return f
| mit |
Vimos/scikit-learn | examples/cluster/plot_kmeans_digits.py | 42 | 4491 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
krez13/scikit-learn | sklearn/metrics/classification.py | 8 | 68395 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight)
return (n_differences / (y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/arrays/categorical/test_constructors.py | 2 | 28825 | from datetime import date, datetime
import numpy as np
import pytest
from pandas.compat import IS64, is_platform_windows
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
class TestCategoricalConstructors:
def test_categorical_scalar_deprecated(self):
# GH#38433
with tm.assert_produces_warning(FutureWarning):
Categorical("A", categories=["A", "B"])
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with pytest.raises(exp_err, match=exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with pytest.raises(exp_err, match=exp_msg):
Categorical.from_codes(
[0, 0, 1], categories=["a", "b", "c"], ordered=ordered
)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array(
[
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
("a", "b"),
],
dtype=object,
)[:-1]
result = Categorical(values)
expected = Index(
[(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)],
tupleize_cols=False,
)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype="O")
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
msg = (
"'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument."
)
with pytest.raises(TypeError, match=msg):
Categorical(arr, ordered=True)
def test_constructor_interval(self):
result = Categorical(
[Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True
)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
Categorical([1, 2], [1, 2, 2])
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ["a", "b", "b"])
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2.0, 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1.0, 2.0, 3.0])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
with tm.assert_produces_warning(FutureWarning):
# GH#38433
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) # noqa
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical( # noqa
np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True
)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
# categories
c0 = Categorical(["a", "b", "c", "a"])
c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
c2 = Categorical(c0, categories=c1.categories)
tm.assert_categorical_equal(c1, c2)
c3 = Categorical(Series(c0), categories=c1.categories)
tm.assert_categorical_equal(c1, c3)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
with pytest.raises(TypeError, match=msg):
Categorical(["a", "b"], categories="a")
def test_constructor_with_null(self):
# Cannot have NaN in categories
msg = "Categorical categories cannot be null"
with pytest.raises(ValueError, match=msg):
Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical(
DatetimeIndex(["nat", "20160101"]),
categories=[NaT, Timestamp("20160101")],
)
def test_constructor_with_index(self):
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(
ci.values, Categorical(ci.astype(object), categories=ci.categories)
)
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
exp = Categorical([0, 1, 2])
cat = Categorical(x for x in [0, 1, 2])
tm.assert_categorical_equal(cat, exp)
cat = Categorical(range(3))
tm.assert_categorical_equal(cat, exp)
MultiIndex.from_product([range(5), ["a", "b", "c"]])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=range(3))
tm.assert_categorical_equal(cat, exp)
def test_constructor_with_rangeindex(self):
# RangeIndex is preserved in Categories
rng = Index(range(3))
cat = Categorical(rng)
tm.assert_index_equal(cat.categories, rng, exact=True)
cat = Categorical([1, 2, 0], categories=rng)
tm.assert_index_equal(cat.categories, rng, exact=True)
@pytest.mark.parametrize(
"dtl",
[
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s"),
],
)
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
idx = idx._with_freq(None) # freq not preserved in result.categories
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_date_objects(self):
# we dont cast date objects to timestamps, matching Index constructor
v = date.today()
cat = Categorical([v, v])
assert cat.categories.dtype == object
assert type(cat.categories[0]) is date
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range("1 days", freq="D", periods=3)
idx = idx._with_freq(None) # freq not preserved in result.categories
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range("2015-01-01", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
@pytest.mark.parametrize(
"values",
[
np.array([1.0, 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype="int64"),
["a", "b", "c", np.nan],
[pd.Period("2014-01"), pd.Period("2014-02"), NaT],
[Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT],
[
Timestamp("2014-01-01", tz="US/Eastern"),
Timestamp("2014-01-02", tz="US/Eastern"),
NaT,
],
],
)
def test_constructor_invariant(self, values):
# GH 14190
c = Categorical(values)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ["b", "a", "c"]
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(["a", "b", "a", "c"], dtype=dtype)
expected = Categorical(
["a", "b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(["a", "b"], ordered=True)
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], categories=["a", "b"], dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=True, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=False, dtype=dtype)
@pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]])
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(
["a", "b"], categories=categories, ordered=ordered, dtype="category"
)
expected = Categorical(["a", "b"], categories=categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
def test_constructor_np_strs(self):
# GH#31499 Hastable.map_locations needs to work on np.str_ objects
cat = Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
assert all(isinstance(x, np.str_) for x in cat.categories)
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "d"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(["a", "b", "d"])
# use categories, ordered
result = Categorical(
values, categories=["a", "b", "c"], ordered=True, dtype="category"
)
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list])
def test_construction_with_null(self, klass, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/31927
values = klass(["a", nulls_fixture, "b"])
result = Categorical(values)
dtype = CategoricalDtype(["a", "b"])
codes = [0, -1, 1]
expected = Categorical.from_codes(codes=codes, dtype=dtype)
tm.assert_categorical_equal(result, expected)
def test_from_codes_empty(self):
cat = ["a", "b", "c"]
result = Categorical.from_codes([], categories=cat)
expected = Categorical([], categories=cat)
tm.assert_categorical_equal(result, expected)
def test_from_codes_too_few_categories(self):
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be between "
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], dtype=dtype)
def test_from_codes_non_int_codes(self):
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], dtype=dtype)
def test_from_codes_non_unique_categories(self):
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"])
def test_from_codes_nan_cat_included(self):
with pytest.raises(ValueError, match="Categorical categories cannot be null"):
Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan])
def test_from_codes_too_negative(self):
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = r"codes need to be between -1 and len\(categories\)-1"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], dtype=dtype)
def test_from_codes(self):
dtype = CategoricalDtype(categories=["a", "b", "c"])
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], categories=dtype.categories)
tm.assert_categorical_equal(exp, res)
res = Categorical.from_codes([0, 1, 2], dtype=dtype)
tm.assert_categorical_equal(exp, res)
@pytest.mark.parametrize("klass", [Categorical, CategoricalIndex])
def test_from_codes_with_categorical_categories(self, klass):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [Categorical, CategoricalIndex])
def test_from_codes_with_non_unique_categorical_categories(self, klass):
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1], klass(["a", "b", "a"]))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
dtype = CategoricalDtype(categories=["a", "b", "c"])
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, categories=dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
@pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]])
def test_from_codes_with_float(self, codes):
# GH21767
# float codes should raise even if values are equal to integers
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_dtype_raises(self):
msg = "Cannot specify"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"])
)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"])
)
def test_from_codes_neither(self):
msg = "Both were None"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
def test_from_codes_with_nullable_int(self):
codes = pd.array([0, 1], dtype="Int64")
categories = ["a", "b"]
result = Categorical.from_codes(codes, categories=categories)
expected = Categorical.from_codes(codes.to_numpy(int), categories=categories)
tm.assert_categorical_equal(result, expected)
def test_from_codes_with_nullable_int_na_raises(self):
codes = pd.array([0, None], dtype="Int64")
categories = ["a", "b"]
msg = "codes cannot contain NA values"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, categories=categories)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories(self, dtype):
cats = ["a", "b"]
codes = np.array([0, 0, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories_sorts(self, dtype):
cats = ["b", "a"]
codes = np.array([0, 1, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ["a", "b", "d"]
codes = np.array([0, 1, 0, 2], dtype="i8")
dtype = CategoricalDtype(["c", "b", "a"], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(
["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ["1", "2", "bad"]
codes = np.array([0, 0, 1, 2], dtype="i8")
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("ordered", [None, True, False])
def test_construction_with_ordered(self, ordered):
# GH 9347, 9190
cat = Categorical([0, 1, 2], ordered=ordered)
assert cat.ordered == bool(ordered)
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
def test_constructor_string_and_tuples(self):
# GH 21416
c = Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object))
expected_index = Index([("a", "b"), ("b", "a"), "c"])
assert c.categories.equals(expected_index)
def test_interval(self):
idx = pd.interval_range(0, 10, periods=10)
cat = Categorical(idx, categories=idx)
expected_codes = np.arange(10, dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# infer categories
cat = Categorical(idx)
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# list values
cat = Categorical(list(idx))
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# list values, categories
cat = Categorical(list(idx), categories=list(idx))
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# shuffled
values = idx.take([1, 2, 0])
cat = Categorical(values, categories=idx)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="int8"))
tm.assert_index_equal(cat.categories, idx)
# extra
values = pd.interval_range(8, 11, periods=3)
cat = Categorical(values, categories=idx)
expected_codes = np.array([8, 9, -1], dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# overlapping
idx = IntervalIndex([Interval(0, 2), Interval(0, 1)])
cat = Categorical(idx, categories=idx)
expected_codes = np.array([0, 1], dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
def test_categorical_extension_array_nullable(self, nulls_fixture):
# GH:
arr = pd.arrays.StringArray._from_sequence([nulls_fixture] * 2)
result = Categorical(arr)
expected = Categorical(Series([pd.NA, pd.NA], dtype="object"))
tm.assert_categorical_equal(result, expected)
def test_from_sequence_copy(self):
cat = Categorical(np.arange(5).repeat(2))
result = Categorical._from_sequence(cat, dtype=None, copy=False)
# more generally, we'd be OK with a view
assert result._codes is cat._codes
result = Categorical._from_sequence(cat, dtype=None, copy=True)
assert not np.shares_memory(result._codes, cat._codes)
@pytest.mark.xfail(
not IS64 or is_platform_windows(),
reason="Incorrectly raising in ensure_datetime64ns",
)
def test_constructor_datetime64_non_nano(self):
categories = np.arange(10).view("M8[D]")
values = categories[::2].copy()
cat = Categorical(values, categories=categories)
assert (cat == values).all()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexing/common.py | 7 | 9615 | """ common utilities """
import itertools
from warnings import catch_warnings
import numpy as np
from pandas.compat import lrange
from pandas.core.dtypes.common import is_scalar
from pandas import Series, DataFrame, Panel, date_range, UInt64Index
from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
_verbose = False
def _mklbl(prefix, n):
return ["%s%s" % (prefix, i) for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base(object):
""" indexing comprehensive base class """
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
with catch_warnings(record=True):
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
with catch_warnings(record=True):
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
with catch_warnings(record=True):
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
with catch_warnings(record=True):
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
with catch_warnings(record=True):
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
with catch_warnings(record=True):
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
with catch_warnings(record=True):
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def generate_indices(self, f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def get_result(self, obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
with catch_warnings(record=True):
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def get_value(self, f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = self.get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
assert rs == xp
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is None:
continue
def _call(obj=obj):
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
# Panel deprecations
if isinstance(obj, Panel):
with catch_warnings(record=True):
_call()
else:
_call()
| apache-2.0 |
RoboticsClubatUCF/RoboSub | ucf_sub_catkin_ros/src/sub_utils/src/color.py | 1 | 2950 | from matplotlib import pyplot as plt
import numpy as np
import argparse
import cv2
from imutils import paths
import imutils
import os
from sklearn.externals import joblib
#Argument Parsing
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--positive", required=True, help="path to positive images directory")
ap.add_argument("-n", "--negative", required=True, help="path to negative images directory")
ap.add_argument("--nf", required = True, help="path to negative feature directory")
ap.add_argument("-r", required = True, help="path to red features directory")
ap.add_argument("-g", required = True, help="path to green features directory")
ap.add_argument("-y", required = True, help="path to yellow features directory")
args = vars(ap.parse_args())
'''This Loop goes through every image in the specified directory and does the following:
1. Read in the image
2. Resize it to a constant size
3. Split color channels
4. Calculate color histogram for each channel and concatenate
5. Flatten feature vector
6. Determine which color buoy this is
7. Dump to appropriate file
'''
for imagePath in paths.list_images(args["positive"]):
image = cv2.imread(imagePath)
image = imutils.resize(image, width=64)
image = imutils.resize(image, height=64)
chans = cv2.split(image)
colors = ("b", "g", "r")
'''plt.figure()
plt.title("'Flattened' Color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")'''
features = []
for (chan, color) in zip(chans, colors):
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
features.extend(hist)
features = np.asarray(features)
features = features.flatten()
if("R" in imagePath):
fd_name = os.path.split(imagePath)[1].split(".")[0] + ".feat"
fd_path = os.path.join(args["r"], fd_name)
joblib.dump(features, fd_path)
if("Y" in imagePath):
fd_name = os.path.split(imagePath)[1].split(".")[0] + ".feat"
fd_path = os.path.join(args["y"], fd_name)
joblib.dump(features, fd_path)
if("G" in imagePath):
fd_name = os.path.split(imagePath)[1].split(".")[0] + ".feat"
fd_path = os.path.join(args["g"], fd_name)
joblib.dump(features, fd_path)
for imagePath in paths.list_images(args["negative"]):
image = cv2.imread(imagePath)
image = imutils.resize(image, width=64)
image = imutils.resize(image, height=64)
chans = cv2.split(image)
colors = ("b", "g", "r")
'''plt.figure()
plt.title("'Flattened' Color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")'''
features = []
for (chan, color) in zip(chans, colors):
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
features.extend(hist)
features = np.asarray(features)
features = features.flatten()
fd_name = os.path.split(imagePath)[1].split(".")[0] + ".feat"
fd_path = os.path.join(args["nf"], fd_name)
joblib.dump(features, fd_path)
'''plt.plot(hist, color = color)
plt.xlim([0, 256])
plt.show()
print "flattened feature vector size: %d" % (np.array(features).flatten().shape)''' | mit |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/stats/tests/test_math.py | 15 | 1927 | import nose
from datetime import datetime
from numpy.random import randn
import numpy as np
from pandas.core.api import Series, DataFrame, date_range
from pandas.util.testing import assert_almost_equal
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
import pandas.stats.math as pmath
import pandas.tests.test_series as ts
from pandas import ols
N, K = 100, 10
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
class TestMath(tm.TestCase):
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def setUp(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = date_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
def test_rank_1d(self):
self.assertEqual(1, pmath.rank(self.series))
self.assertEqual(0, pmath.rank(Series(0, self.series.index)))
def test_solve_rect(self):
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
expected = ols(y=b, x=self.frame, intercept=False).beta
self.assertTrue(np.allclose(result, expected))
def test_inv_illformed(self):
singular = DataFrame(np.array([[1, 1], [2, 2]]))
rs = pmath.inv(singular)
expected = np.array([[0.1, 0.2], [0.1, 0.2]])
self.assertTrue(np.allclose(rs, expected))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
chrisdamba/mining | mining/controllers/data/__init__.py | 4 | 5907 | # -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
import json
import gc
from bottle import Bottle, request, response
from bottle.ext.mongo import MongoPlugin
from pandas import DataFrame
from mining.settings import PROJECT_PATH
from mining.utils import conf, __from__
from mining.utils._pandas import df_generate, DataFrameSearchColumn
from mining.db import DataWarehouse
data_app = Bottle()
mongo = MongoPlugin(
uri=conf("mongodb")["uri"],
db=conf("mongodb")["db"],
json_mongo=True)
data_app.install(mongo)
@data_app.route('/<slug>')
def data(mongodb, slug):
# check protocol to work
ws = request.environ.get('wsgi.websocket')
protocol = "websocket"
if not ws:
response.content_type = 'application/json'
protocol = "http"
DataManager = __from__(
"mining.controllers.data.{}.DataManager".format(protocol))
# instantiates the chosen protocol
DM = DataManager(ws)
# instantiate data warehouse
DW = DataWarehouse()
element = mongodb['element'].find_one({'slug': slug})
element['page_limit'] = 50
if request.GET.get('limit', True) is False:
element['page_limit'] = 9999999999
if element['type'] == 'grid' and "download" not in request.GET.keys():
page = int(request.GET.get('page', 1))
page_start = 0
page_end = element['page_limit']
if page >= 2:
page_end = element['page_limit'] * page
page_start = page_end - element['page_limit']
else:
page = 1
page_start = None
page_end = None
filters = [i[0] for i in request.GET.iteritems()
if len(i[0].split('filter__')) > 1]
if not DW.search:
data = DW.get(element.get('cube'), page=page)
else:
data = DW.get(element.get('cube'), filters=filters, page=page)
columns = data.get('columns') or []
fields = columns
if request.GET.get('fields', None):
fields = request.GET.get('fields').split(',')
cube_last_update = mongodb['cube'].find_one({'slug': element.get('cube')})
DM.send(json.dumps({'type': 'last_update',
'data': str(cube_last_update.get('lastupdate', ''))}))
DM.send(json.dumps({'type': 'columns', 'data': fields}))
df = DataFrame(data.get('data') or {}, columns=fields)
if len(filters) >= 1:
for f in filters:
s = f.split('__')
field = s[1]
operator = s[2]
value = request.GET.get(f)
if operator == 'like':
df = df[df[field].str.contains(value)]
elif operator == 'regex':
df = DataFrameSearchColumn(df, field, value, operator)
else:
df = df.query(df_generate(df, value, f))
groupby = []
if request.GET.get('groupby', None):
groupby = request.GET.get('groupby', "").split(',')
if len(groupby) >= 1:
df = DataFrame(df.groupby(groupby).grouper.get_group_levels())
if request.GET.get('orderby',
element.get('orderby', None)) and request.GET.get(
'orderby', element.get('orderby', None)) in fields:
orderby = request.GET.get('orderby', element.get('orderby', ''))
if type(orderby) == str:
orderby = orderby.split(',')
orderby__order = request.GET.get('orderby__order',
element.get('orderby__order', ''))
if type(orderby__order) == str:
orderby__order = orderby__order.split(',')
ind = 0
for orde in orderby__order:
if orde == '0':
orderby__order[ind] = False
else:
orderby__order[ind] = True
ind += 1
df = df.sort(orderby, ascending=orderby__order)
DM.send(json.dumps({'type': 'max_page',
'data': data.get('count', len(df))}))
# CLEAN MEMORY
del filters, fields, columns
gc.collect()
categories = []
# TODO: loop in aggregate (apply mult aggregate)
aggregate = [i[0] for i in request.GET.iteritems()
if len(i[0].split('aggregate__')) > 1]
if len(aggregate) >= 1:
agg = aggregate[0].split('__')
_agg = getattr(df.groupby(agg[1]), request.GET.get(aggregate[0]))()
DF_A = DataFrame(_agg[_agg.keys()[0]]).to_dict().get(_agg.keys()[0])
DM.send(json.dumps({'type': 'aggregate', 'data': DF_A}))
records = df.to_dict(orient='records')
if not DW.search:
records = records[page_start:page_end]
for i in records:
if element.get('categories', None):
categories.append(i[element.get('categories')])
DM.send(json.dumps({'type': 'data', 'data': i}))
DM.send(json.dumps({'type': 'categories', 'data': categories}))
DM.send(json.dumps({'type': 'close'}))
# CLEAN MEMORY
del categories
gc.collect()
if not ws:
if "download" in request.GET.keys():
ext = request.GET.get("download", "xls")
if ext == '':
ext = 'xls'
file_name = '{}/frontend/assets/exports/openmining-{}.{}'.format(
PROJECT_PATH, element.get('cube'), ext)
if ext == 'csv':
df.to_csv(file_name, sep=";")
contenttype = 'text/csv'
else:
df.to_excel(file_name)
contenttype = 'application/vnd.ms-excel'
response.set_header('charset', 'utf-8')
response.set_header('Content-disposition', 'attachment; '
'filename={}.{}'.format(
element.get('cube'), ext))
response.content_type = contenttype
ifile = open(file_name, "r")
o = ifile.read()
ifile.close()
return o
return json.dumps(DM.data)
| mit |
fbagirov/scikit-learn | sklearn/utils/estimator_checks.py | 33 | 48331 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/sas/sas_xport.py | 14 | 14805 | """
Read a SAS XPort format file into a Pandas DataFrame.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from datetime import datetime
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas import compat
import struct
import numpy as np
from pandas.util._decorators import Appender
import warnings
_correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_header1 = ("HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!"
"000000000000000001600000000")
_correct_header2 = ("HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_obs_header = ("HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_fieldkeys = ['ntype', 'nhfun', 'field_length', 'nvar0', 'name', 'label',
'nform', 'nfl', 'num_decimals', 'nfj', 'nfill', 'niform',
'nifl', 'nifd', 'npos', '_']
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = """Read a SAS file into a DataFrame.
%(_base_params_doc)s
%(_format_params_doc)s
%(_params2_doc)s
%(_iterator_doc)s
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pandas.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pandas.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
.. versionadded:: 0.17.0
""" % {"_base_params_doc": _base_params_doc,
"_format_params_doc": _format_params_doc,
"_params2_doc": _params2_doc,
"_iterator_doc": _iterator_doc}
_xport_reader_doc = """\
Class for reading SAS Xport files.
%(_base_params_doc)s
%(_params2_doc)s
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
""" % {"_base_params_doc": _base_params_doc,
"_params2_doc": _params2_doc}
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
def _parse_date(datestr):
""" Given a date in xport format, return Python date. """
try:
# e.g. "16FEB11:10:07:55"
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
except ValueError:
return pd.NaT
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out
def _handle_truncated_float_vec(vec, nbytes):
# This feature is not well documented, but some SAS XPORT files
# have 2-7 byte "truncated" floats. To read these truncated
# floats, pad them with zeros on the right to make 8 byte floats.
#
# References:
# https://github.com/jcushman/xport/pull/3
# The R "foreign" library
if nbytes != 8:
vec1 = np.zeros(len(vec), np.dtype('S8'))
dtype = np.dtype('S%d,S%d' % (nbytes, 8 - nbytes))
vec2 = vec1.view(dtype=dtype)
vec2['f0'] = vec
return vec2
return vec
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# Get the second half of the ibm number into the second half of
# the ieee number
ieee2 = xport2
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee
class XportReader(BaseIterator):
__doc__ = _xport_reader_doc
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
chunksize=None):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
filepath_or_buffer, encoding, compression = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding)
if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
except:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
self._read_header()
def close(self):
self.filepath_or_buffer.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
self.close()
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
fif = [['prefix', 24], ['version', 8], ['OS', 8],
['_', 24], ['created', 16]]
file_info = _split_line(line2, fif)
if file_info['prefix'] != "SAS SAS SASLIB":
self.close()
raise ValueError("Header record has invalid prefix.")
file_info['created'] = _parse_date(file_info['created'])
self.file_info = file_info
line3 = self._get_row()
file_info['modified'] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
headflag1 = header1.startswith(_correct_header1)
headflag2 = (header2 == _correct_header2)
if not (headflag1 and headflag2):
self.close()
raise ValueError("Member header not found")
# usually 140, could be 135
fieldnamelength = int(header1[-5:-2])
# member info
mem = [['prefix', 8], ['set_name', 8], ['sasdata', 8],
['version', 8], ['OS', 8], ['_', 24], ['created', 16]]
member_info = _split_line(self._get_row(), mem)
mem = [['modified', 16], ['_', 16], ['label', 40], ['type', 8]]
member_info.update(_split_line(self._get_row(), mem))
member_info['modified'] = _parse_date(member_info['modified'])
member_info['created'] = _parse_date(member_info['created'])
self.member_info = member_info
# read field names
types = {1: 'numeric', 2: 'char'}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength * fieldcount
# round up to nearest 80
if datalength % 80:
datalength += 80 - datalength % 80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (fielddata[:fieldnamelength],
fielddata[fieldnamelength:])
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', field)
field = dict(zip(_fieldkeys, fieldstruct))
del field['_']
field['ntype'] = types[field['ntype']]
fl = field['field_length']
if field['ntype'] == 'numeric' and ((fl < 2) or (fl > 8)):
self.close()
msg = "Floating field width {0} is not between 2 and 8."
raise TypeError(msg.format(fl))
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field['field_length']
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
self.close()
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x['name'].decode() for x in self.fields]
# Setup the dtype.
dtypel = []
for i, field in enumerate(self.fields):
dtypel.append(('s' + str(i), "S" + str(field['field_length'])))
dtype = np.dtype(dtypel)
self._dtype = dtype
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = (self.filepath_or_buffer.tell() -
self.record_start)
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype='u1,u1,u2,u4')
miss = (v['f1'] == 0) & (v['f2'] == 0) & (v['f3'] == 0)
miss1 = (((v['f0'] >= 0x41) & (v['f0'] <= 0x5a)) |
(v['f0'] == 0x5f) | (v['f0'] == 0x2e))
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
self.close()
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data['s%d' % j]
ntype = self.fields[j]['ntype']
if ntype == "numeric":
vec = _handle_truncated_float_vec(
vec, self.fields[j]['field_length'])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]['ntype'] == 'char':
v = [y.rstrip() for y in vec]
if compat.PY3:
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
| agpl-3.0 |
mikekestemont/beckett | code/diachron.py | 1 | 7046 | import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sb
sb.set_style("dark")
import os
import string
import codecs
import glob
from operator import itemgetter
from collections import namedtuple
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest, f_classif, chi2
from sklearn.decomposition import PCA
from HACluster import *
import PLM
from nltk.tokenize import wordpunct_tokenize
def identity(x):
return x
Oeuvre = namedtuple('Oeuvre', ['dates', 'titles', 'texts'])
def load_data(genres=['prose'], data_dir="../data",
min_nb_tokens=1000):
items = []
# iterate over relevant genres:
for genre in genres:
for filename in glob.glob(data_dir+"/"+genre+"/*.txt"):
print "\t+ "+filename,
with codecs.open(filename, 'r', 'utf-8') as F:
words = wordpunct_tokenize(F.read().lower())
if len(words) >= min_nb_tokens:
print ">>> "+str(len(words))+" words loaded:",
print (" ".join(words[:6])).strip()
genre, date, title = os.path.basename(filename).replace(".txt", "").split("_")
date = int(date)
items.append((date, title, words))
else:
print ">>> file too short"
# sort texts chronologically:
items.sort(key=itemgetter(0))
return Oeuvre(*zip(*items))
def sample(oeuvre, sample_size=2500):
dates, titles, samples = [], [], []
for date, title, text in zip(*oeuvre):
if len(text) > sample_size: # more than one sample
start_idx, end_idx, cnt = 0, sample_size, 0
while end_idx <= len(text):
dates.append(date)
titles.append(str(title)+"_"+str(cnt+1))
samples.append(text[start_idx:end_idx])
cnt+=1
start_idx+=sample_size
end_idx+=sample_size
else:
dates.append(str(date)+"_1")
titles.append(str(title)+"_1")
samples.append(text)
return Oeuvre(dates, titles, samples)
def load_stopwords(filepath="../data/stopwords.txt"):
return set(codecs.open(filepath, 'r', 'utf-8').read().lower().split())
sample_size = 1000
genres = ['drama']
oeuvre = load_data(genres=genres, min_nb_tokens=sample_size)
oeuvre = sample(oeuvre=oeuvre, sample_size=sample_size)
stopwords = load_stopwords()
vectorizer = TfidfVectorizer(analyzer=identity,
vocabulary=stopwords,
#max_features=1000,
use_idf=False)
X = vectorizer.fit_transform(oeuvre.texts).toarray()
def vnc():
dist_matrix = DistanceMatrix(X, lambda u,v: np.sum((u-v)**2)/2)
# initialize a clusterer, with default linkage methode (Ward)
clusterer = VNClusterer(dist_matrix)
# start the clustering procedure
clusterer.cluster(verbose=0)
# plot the result as a dendrogram
clusterer.dendrogram().draw(title="Becket's oeuvre - VNC analysis",#clusterer.linkage.__name__,
labels=oeuvre.titles,#oeuvre.dates,
show=False, save=True,
fontsize=3)
#vnc()
def plm(break_date=1955, nb=50):
big_docs = {"before":[], "after":[]}
for text, date in zip(oeuvre.texts, oeuvre.dates):
if date < break_date:
big_docs["before"].extend(text)
else:
big_docs["after"].extend(text)
plm = PLM.ParsimoniousLM(big_docs.values(), 0.1)
plm.fit(big_docs.values(), big_docs.keys())
for category, lm in plm.fitted_:
print category
words = plm.vectorizer.get_feature_names()
scores = []
for word, score in sorted(zip(words, lm), key=lambda i:i[1], reverse=True)[:nb]:
scores.append((word, np.exp(score)))
print scores
#plm()
def tau(nb=10):
from scipy.stats import kendalltau
df = pd.DataFrame(X)
df.columns = vectorizer.get_feature_names()
df.index = oeuvre.titles
scores = []
ranks = range(1,len(df.index)+1)
for feat in df.columns:
tau, p = kendalltau(ranks, df[feat].tolist())
scores.append((feat, tau))
scores.sort(key=itemgetter(1))
nb = 5
top, bottom = scores[:nb], scores[-nb:]
fig = sb.plt.figure()
sb.set_style("darkgrid")
for (feat, tau), col in zip(top, sb.color_palette("Set1")[:nb]):
sb.plt.plot(ranks, df[feat].tolist(), label=feat, c=col)
sb.plt.legend(loc="best")
sb.plt.xlabel('Diachrony', fontsize=10)
sb.plt.ylabel('Frequency', fontsize=10)
sb.plt.savefig("top_tau.pdf")
fig = sb.plt.figure()
sb.set_style("darkgrid")
for (feat, tau), col in zip(bottom, sb.color_palette("Set1")[:nb]):
sb.plt.plot(ranks, df[feat].tolist(), label=feat, c=col)
sb.plt.legend(loc="best")
sb.plt.xlabel('Diachrony', fontsize=10)
sb.plt.ylabel('Frequency', fontsize=10)
sb.plt.savefig("bottom_tau.pdf")
tau()
def ngram_viewer(items=[]):
items = set(items)
df = pd.DataFrame(X)
df.columns = vectorizer.get_feature_names()
df.index = oeuvre.titles
ranks = range(1,len(df.index)+1)
fig = sb.plt.figure()
sb.set_style("darkgrid")
# remove OOV items
items = {item for item in items if item in df}
for item, colour in zip(items, sb.color_palette("Set1")[:len(items)]):
sb.plt.plot(ranks, df[item].tolist(), label=item, c=colour)
sb.plt.legend(loc="best")
sb.plt.xlabel('Diachrony', fontsize=10)
sb.plt.ylabel('Frequency', fontsize=10)
sb.plt.savefig("ngram_viewer.pdf")
#ngram_viewer(["no", "less", "neither"])
# un- als prefix?
# leestekens beter weglaten
def pca():
import pylab as Plot
# scale X:
from sklearn.preprocessing import StandardScaler
Xs = StandardScaler().fit_transform(X)
P = PCA(n_components=2)
Xr = P.fit_transform(Xs)
loadings = P.components_.transpose()
sb.set_style("darkgrid")
fig, ax1 = plt.subplots()
#Plot.tick_params(axis='both',which='both',top='off', left='off', right="off", bottom="off", labelbottom='off', labelleft="off", labelright="off")
# first samples:
x1, x2 = Xr[:,0], Xr[:,1]
ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none');
for x,y,l in zip(x1, x2, oeuvre.titles):
print(l)
ax1.text(x, y, l ,ha='center', va="center", size=10, color="darkgrey")
# now loadings:
sb.set_style("dark")
ax2 = ax1.twinx().twiny()
l1, l2 = loadings[:,0], loadings[:,1]
ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none');
for x,y,l in zip(l1, l2, vectorizer.get_feature_names()):
l = l.encode('utf8')
print(l)
ax2.text(x, y, l ,ha='center', va="center", size=10, color="black")
plt.savefig("pca.pdf", bbox_inches=0)
#pca()
| mit |
rajat1994/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ZENGXH/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 40 | 16837 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
strands-project/strands_qsr_lib | qsr_lib/dbg/dbg_cardinal_directions.py | 8 | 3697 | #!/usr/bin/python
import math
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
class Dbg(object):
def __init__(self):
pass
def return_bounding_box_2d(self, x, y, xsize, ysize):
"""Return the bounding box
:param x: x center
:param y: y center
:param xsize: x size
:param ysize: y size
:return: list(x1, y1, x2, y2) where (x1, y1) and (x2, y2) are the coordinates of the diagonal points of the
bounding box depending on your coordinates frame
"""
if xsize <= 0 or ysize <= 0:
print("ERROR: can't compute bounding box, xsize or height has no positive value")
return []
return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]
def compute_qsr(self, bb1, bb2):
"""Wrapper for __compute_qsr
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return: an RCC depending on your implementation
"""
return self.__compute_qsr(bb1, bb2)
def __compute_qsr(self, bb1, bb2):
"""Return cardinal direction relation
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return: a string containing a cardinal direction relation
directions: 'north', 'north-east', 'east', 'south-east', 'south', 'south-west', 'west', 'north-west', 'same', 'unknown'
"""
# Finds the differnece between the centres of each object
dx = ((bb2[0]+bb2[2])/2.0) - ((bb1[0]+bb1[2])/2.0)
dy = ((bb2[1]+bb2[3])/2.0) - ((bb1[1]+bb1[3])/2.0)
if dx==0 and dy==0:
return 'same'
# Calculate the angle of the line between the two objects (in degrees)
angle = (math.atan2(dx,dy) * (180/math.pi))+22.5
# If that angle is negative, invert it
if angle < 0.0:
angle = (360.0 + angle)
# Lookup labels and return answer
return self.directionSwitch(math.floor(((angle)/45.0)))
# Switch Statement convert number into region label
def directionSwitch(self,x):
return {
0 : 'north',
1 : 'north-east',
2 : 'east',
3 : 'south-east',
4 : 'south',
5 : 'south-west',
6 : 'west',
7 : 'north-west',
}.get((x), 'unknown')
def plot_bbs(bb1, bb2):
plt.figure()
ax = plt.gca()
# ax.invert_yaxis()
ax.add_patch(Rectangle((bb1[0], bb1[1]), bb1[2]-bb1[0], bb1[3]-bb1[1], alpha=1, facecolor="blue"))
ax.annotate("o1", (bb1[0], bb1[1]), color='black', weight='bold', fontsize=14)
ax.add_patch(Rectangle((bb2[0], bb2[1]), bb2[2]-bb2[0], bb2[3]-bb2[1], alpha=1, facecolor="red"))
ax.annotate("o2", (bb2[0], bb2[1]), color='black', weight='bold', fontsize=14)
h = 6
l = 0
# ax.set_xlim(l, h)
# ax.set_ylim(l, h)
ax.set_xlim(l, h)
ax.set_ylim(h, l)
plt.show()
if __name__ == '__main__':
dbg = Dbg()
# Play with these to test (x_center, y_center, xsize(i.e. x-size), ysize(i.e. y-size))
o1 = (2.0, 2.0, 1., 1.)
o2 = (1., 3., 1., 1.)
o1 = dbg.return_bounding_box_2d(o1[0], o1[1], o1[2], o1[3])
o2 = dbg.return_bounding_box_2d(o2[0], o2[1], o2[2], o2[3])
# Bounding boxes
# print("o1:", o1)
# print("o2:", o2)
# Relations
print("o1o2:", dbg.compute_qsr(o1, o2))
print("o2o1:", dbg.compute_qsr(o2, o1))
# Plot the boxes
plot_bbs(o1, o2)
| mit |
ArcherSys/ArcherSys | eclipse/plugins/org.python.pydev_4.5.5.201603221110/pysrc/pydev_ipython/qt_for_kernel.py | 67 | 2337 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| mit |
louispotok/pandas | pandas/tests/frame/test_timezones.py | 7 | 5632 | # -*- coding: utf-8 -*-
"""
Tests for DataFrame timezone-related methods
"""
from datetime import datetime
import pytest
import pytz
import numpy as np
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.core.indexes.datetimes import date_range
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas import Series, DataFrame
class TestDataFrameTimezones(object):
def test_frame_from_records_utc(self):
rec = {'datum': 1.5,
'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index='begin_time')
def test_frame_tz_localize(self):
rng = date_range('1/1/2011', periods=100, freq='H')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_localize('utc')
expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))
assert result.index.tz.zone == 'UTC'
tm.assert_frame_equal(result, expected)
df = df.T
result = df.tz_localize('utc', axis=1)
assert result.columns.tz.zone == 'UTC'
tm.assert_frame_equal(result, expected.T)
def test_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('Europe/Berlin')
expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))
assert result.index.tz.zone == 'Europe/Berlin'
tm.assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('Europe/Berlin', axis=1)
assert result.columns.tz.zone == 'Europe/Berlin'
tm.assert_frame_equal(result, expected.T)
def test_frame_join_tzaware(self):
test1 = DataFrame(np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6,
freq="100L", tz="US/Central"))
test2 = DataFrame(np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3,
freq="250L", tz="US/Central"),
columns=lrange(3, 6))
result = test1.join(test2, how='outer')
ex_index = test1.index.union(test2.index)
tm.assert_index_equal(result.index, ex_index)
assert result.index.tz.zone == 'US/Central'
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng)), index=rng, columns=['a'])
df_moscow = df.tz_convert('Europe/Moscow')
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_frame_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# different timezones convert to UTC
# frame with frame
df1_central = df1.tz_convert('US/Central')
new1, new2 = df1.align(df1_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# frame with Series
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
df1[0].align(df1_central, axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern'])
def test_frame_no_datetime64_dtype(self, tz):
# after GH#7822
# these retain the timezones on dict construction
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(tz)
df = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo)
assert df['B'].dtype == tz_expected
# GH#2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({'dr': dr,
'dr_tz': dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz': datetimes_with_tz})
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern'])
def test_frame_reset_index(self, tz):
dr = date_range('2012-06-02', periods=10, tz=tz)
df = DataFrame(np.random.randn(len(dr)), dr)
roundtripped = df.reset_index().set_index('index')
xp = df.index.tz
rs = roundtripped.index.tz
assert xp == rs
@pytest.mark.parametrize('tz', [None, 'America/New_York'])
def test_boolean_compare_transpose_tzindex_with_dst(self, tz):
# GH 19970
idx = date_range('20161101', '20161130', freq='4H', tz=tz)
df = DataFrame({'a': range(len(idx)), 'b': range(len(idx))},
index=idx)
result = df.T == df.T
expected = DataFrame(True, index=list('ab'), columns=idx)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
viiru-/pytrainer | pytrainer/lib/graphdata.py | 2 | 4969 | # -*- coding: iso-8859-1 -*-
#Copyright (C) Fiz Vazquez vud1@sindominio.net
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import logging
import gtk
class GraphData:
'''
Class to hold data and formating for graphing via matplotlib
'''
def __init__(self, title=None, ylabel=None, xlabel=None):
logging.debug('>>')
self.title = title
self.ylabel = ylabel
self.xlabel = xlabel
self.labels = []
self.colors = []
self.x_values = []
self.bar_bottoms = []
self.bar_widths = []
self.y_values = []
self.linewidth = 1
self.linecolor = '#ff0000'
self.y2linecolor = '#ff0000'
self.max_x_value = None
self.min_x_value = None
self.max_y_value = None
self.min_y_value = None
self.graphType = "plot"
self.show_on_y1 = False
self.show_on_y2 = False
logging.debug('<<')
def addBars(self, x=None, y=None):
if x is None or y is None:
#logging.debug("Must supply both x and y data points, got x:'%s' y:'%s'" % (str(x), str(y)))
return
#print('Adding point: %s %s' % (str(x), str(y)))
if len(self.x_values) == 0:
#First bar, so start a 0
self.x_values.append(0)
else:
#Second or subsequent bar, so start at last point
#Which is previous left+width
items = len(self.x_values)
last_left = self.x_values[items-1]
last_width = self.bar_widths[items-1]
new_left = last_left+last_width
self.x_values.append(new_left)
self.bar_widths.append(x)
self.y_values.append(y)
self.bar_bottoms.append(0)
def addPoints(self, x=None, y=None, label=None, color=None):
#if x is None or y is None or x is "":
if not x or not y:
#logging.debug("Must supply both x and y data points, got x:'%s' y:'%s'" % (str(x), str(y)))
return
#print('Adding point: %s %s' % (str(x), str(y)))
self.x_values.append(x)
self.y_values.append(y)
if label is not None:
self.labels.append(label)
if color is not None:
self.colors.append(color)
if self.max_x_value is None or x > self.max_x_value:
self.max_x_value = x
if self.min_x_value is None or x < self.min_x_value:
self.min_x_value = x
if self.max_y_value is None or y > self.max_y_value:
self.max_y_value = y
if self.min_y_value is None or y < self.min_y_value:
self.min_y_value = y
def get_color(self, color):
'''
'''
if color is None:
return None
try:
#Generate 13 digit color string from supplied color
col = gtk.gdk.color_parse(color).to_string()
except ValueError:
logging.debug("Unable to parse color from '%s'" % color)
return None
#Create matplotlib color string
_color = "#%s%s%s" % (col[1:3], col[5:7], col[9:11])
#logging.debug("%s color saved as: %s" % (color, _color))
return _color
def set_color(self, y1color, y2color = None):
'''
Helper function to set the line color
need as some gtk.gdk color can be invalid for matplotlib
'''
_color = self.get_color(y1color)
_color2 = self.get_color(y2color)
#if _color is not None:
self.linecolor = _color
#if _color2 is not None:
self.y2linecolor = _color2
def __len__(self):
if self.x_values is None:
return None
return len(self.x_values)
def __str__(self):
return '''
Title: %s
ylabel: %s
xlabel: %s
linewidth: %d
linecolor: %s
graphType: %s
show on y1: %s
show on y2: %s
x min max: %s %s
y min max: %s %s
x values: %s
y values: %s''' % (self.title,
self.ylabel,
self.xlabel,
self.linewidth,
self.linecolor,
self.graphType,
str(self.show_on_y1),
str(self.show_on_y2),
str(self.min_x_value), str(self.max_x_value),
str(self.min_y_value), str(self.max_y_value),
str(self.x_values),
str(self.y_values)
)
| gpl-2.0 |
prasetiyohadi/learn-computations | monte-carlo/hitnmiss3.py | 1 | 2969 | from collections import OrderedDict as od
from matplotlib import pyplot as plt
import csv
import operator
import random
import time
# function
def myfunc(x, y, z):
return x*y**2*z+x**3*y*z**2-x*y*z**3
# analitically integrated function
def myintfunc(xa, ya, za, xb, yb, zb):
return (xb**2-xa**2)*(yb**3-ya**3)*(zb**2-za**2)/12 +\
(xb**4-xa**4)*(yb**2-ya**2)*(zb**3-za**3)/24 -\
(xb**2-xa**2)*(yb**2-ya**2)*(zb**4-za**4)/16
# initial number of points
nInit = 10
# power steps
p = 6
# integration boundary
xa = 1
xb = 3
ya = 0
yb = 5
za = 2
zb = 4
# find maximum value of myfunc in integration range
maxvals = {}
for i in range(xa, xb+1):
for j in range(ya, yb+1):
for k in range(za, zb+1):
maxvals["%s%s%s" % (i, j, k)] = [myfunc(i, j, k)]
maxval = sorted(maxvals.items(), key=operator.itemgetter(1))
print("Nilai maximum fungsi = %s dengn nilai xyz = %s" %
(maxval[-1][1][0], maxval[-1][0]))
# function ceiling
c = maxval[-1][1][0]
# construct result dictionary
result = {}
# calculate analytical integration result
reint = myintfunc(xa, ya, za, xb, yb, zb)
# define monte-carlo version of myfunc
def mymcfunc(xr, yr, zr):
return myfunc((xa+(xb-xa)*xr), (ya+(yb-ya)*yr), (za+(zb-za)*zr))
# calculate hit and miss monte-carlo integration result
for x in range(0, p):
N = nInit*10**x
count = 0
init = time.perf_counter()
xrh = []
yrh = []
zrh = []
vrh = []
for i in range(0, N):
xr = random.uniform(0, 1)
yr = random.uniform(0, 1)
zr = random.uniform(0, 1)
vr = random.uniform(0, 1)
if (c*vr < mymcfunc(xr, yr, zr)):
count = count + 1
xrh = xr
yrh = yr
zrh = zr
vrh = vr
deltat = time.perf_counter() - init
mcint = count/N*c*(xb-xa)*(yb-ya)*(zb-za)
result[N] = [mcint, abs(reint-mcint), deltat]
# save the result to CSV file
# sort result dictionary
oresult = od(sorted(result.items()))
# write to CSV file
with open('result.csv', 'w') as resfile:
csvfile = csv.writer(resfile)
csvfile.writerow(["jumlah titik", "hasil integrasi monte-carlo",
"kesalahan hasil integrasi", "waktu eksekusi"])
for key, value in oresult.items():
csvfile.writerow([key]+value)
x = [item for item in oresult.keys()]
y1 = [item[1] for item in oresult.values()]
y2 = [item[2] for item in oresult.values()]
colors = list('kymcbgr')
plt.semilogx(x, y1, color=colors.pop())
plt.title("Grafik kesalahan hasil integrasi terhadap jumlah titik")
plt.xlabel("jumlah titik (N)")
plt.ylabel("kesalahan hasil integrasi")
plt.grid(True)
plt.savefig('errorvsn.png', bbox_inches='tight')
# plt.show()
plt.close()
plt.semilogx(x, y2, color=colors.pop())
plt.title("Grafik waktu eksekusi terhadap jumlah titik")
plt.xlabel("jumlah titik (N)")
plt.ylabel("waktu eksekusi (s)")
plt.grid(True)
plt.savefig('deltatvsn.png', bbox_inches='tight')
# plt.show()
plt.close()
| mit |
da1z/intellij-community | python/helpers/pydev/pydevd.py | 1 | 69071 | '''
Entry point module (keep at root):
This module starts the debugger.
'''
import sys
if sys.version_info[:2] < (2, 6):
raise RuntimeError('The PyDev.Debugger requires Python 2.6 onwards to be run. If you need to use an older Python version, use an older version of the debugger.')
import atexit
import os
import traceback
from _pydevd_bundle.pydevd_constants import IS_JYTH_LESS25, IS_PY3K, IS_PY34_OR_GREATER, IS_PYCHARM, get_thread_id, \
dict_keys, dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, xrange, \
clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE
from _pydev_bundle import fix_getpass
from _pydev_bundle import pydev_imports, pydev_log
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_io, pydevd_vm_type, pydevd_tracing
from _pydevd_bundle import pydevd_utils
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle.pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, update_exception_hook
from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO, CMD_STEP_OVER, \
CMD_STEP_RETURN, CMD_STEP_INTO_MY_CODE, CMD_THREAD_SUSPEND, CMD_RUN_TO_LINE, \
CMD_ADD_EXCEPTION_BREAK, CMD_SMART_STEP_INTO, InternalConsoleExec, NetCommandFactory, \
PyDBDaemonThread, _queue, ReaderThread, GetGlobalDebugger, get_global_debugger, \
set_global_debugger, WriterThread, pydevd_find_thread_by_id, pydevd_log, \
start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace, \
InternalSendCurrExceptionTraceProceeded
from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from _pydevd_bundle.pydevd_trace_dispatch import trace_dispatch as _trace_dispatch, global_cache_skips, global_cache_frame_skips, show_tracing_warning
from _pydevd_frame_eval.pydevd_frame_eval_main import frame_eval_func, stop_frame_eval, enable_cache_frames_without_breaks, \
dummy_trace_dispatch, show_frame_eval_warning
from _pydevd_bundle.pydevd_utils import save_main_module
from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_message, cur_time
from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads
__version_info__ = (1, 1, 1)
__version_info_str__ = []
for v in __version_info__:
__version_info_str__.append(str(v))
__version__ = '.'.join(__version_info_str__)
#IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from _pydevd_bundle.pydevd_plugin_utils import PluginManager
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
remote = False
forked = False
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self._py_db_command_thread_event = py_db._py_db_command_thread_event
self.py_db = py_db
self.setName('pydevd.CommandThread')
def _on_run(self):
for i in xrange(1, 10):
time.sleep(0.5) #this one will only start later on (because otherwise we may not have any non-daemon threads
if self.killReceived:
return
if self.pydev_do_not_trace:
self.py_db.SetTrace(None) # no debugging on this thread
try:
while not self.killReceived:
try:
self.py_db.process_internal_commands()
except:
pydevd_log(0, 'Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.5)
except:
pydev_log.debug(sys.exc_info()[0])
#only got this error in interpreter shutdown
#pydevd_log(0, 'Finishing debug communication...(3)')
#=======================================================================================================================
# CheckOutputThread
# Non-daemonic thread guaranties that all data is written even if program is finished
#=======================================================================================================================
class CheckOutputThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self.py_db = py_db
self.setName('pydevd.CheckAliveThread')
self.daemon = False
py_db.output_checker = self
def _on_run(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
while not self.killReceived:
time.sleep(0.3)
if not self.py_db.has_threads_alive() and self.py_db.writer.empty() \
and not has_data_to_redirect():
try:
pydev_log.debug("No alive threads, finishing debug session")
self.py_db.finish_debugging_session()
kill_all_pydev_threads()
except:
traceback.print_exc()
self.killReceived = True
self.py_db.check_output_redirect()
def do_kill_pydev_thread(self):
self.killReceived = True
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB:
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling process_net_command.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def __init__(self):
set_global_debugger(self)
pydevd_tracing.replace_sys_set_trace_func()
self.reader = None
self.writer = None
self.output_checker = None
self.quitting = None
self.cmd_factory = NetCommandFactory()
self._cmd_queue = {} # the hash of Queues. Key is thread id, value is thread
self.breakpoints = {}
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.ready_to_run = False
self._main_lock = thread.allocate_lock()
self._lock_running_thread_ids = thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self._finish_debugging_session = False
self._termination_event_set = False
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.break_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
# Suspend debugger even if breakpoint condition raises an exception
SUSPEND_ON_BREAKPOINT_EXCEPTION = True
self.suspend_on_breakpoint_exception = SUSPEND_ON_BREAKPOINT_EXCEPTION
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
#this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
#acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
#find that thread alive anymore, we must remove it from this list and make the java side know that the thread
#was killed.
self._running_thread_ids = {}
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
#working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
self.thread_analyser = None
self.asyncio_analyser = None
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
self.first_breakpoint_reached = False
self.is_filter_enabled = pydevd_utils.is_filter_enabled()
self.is_filter_libraries = pydevd_utils.is_filter_libraries()
self.show_return_values = False
self.remove_return_values_flag = False
# this flag disables frame evaluation even if it's available
self.do_not_use_frame_eval = False
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def not_in_scope(self, filename):
return pydevd_utils.not_in_project_roots(filename)
def is_ignored_by_filters(self, filename):
return pydevd_utils.is_ignored_by_filter(filename)
def first_appearance_in_scope(self, trace):
if trace is None or self.not_in_scope(trace.tb_frame.f_code.co_filename):
return False
else:
trace = trace.tb_next
while trace is not None:
frame = trace.tb_frame
if not self.not_in_scope(frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def has_threads_alive(self):
for t in threadingEnumerate():
if getattr(t, 'is_pydev_daemon_thread', False):
#Important: Jython 2.5rc4 has a bug where a thread created with thread.start_new_thread won't be
#set as a daemon thread, so, we also have to check for the 'is_pydev_daemon_thread' flag.
#See: https://github.com/fabioz/PyDev.Debugger/issues/11
continue
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if is_thread_alive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def finish_debugging_session(self):
self._finish_debugging_session = True
def initialize_network(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = WriterThread(sock)
self.reader = ReaderThread(sock)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = start_client(host, port)
else:
s = start_server(port)
self.initialize_network(s)
def get_internal_queue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
try:
return self._cmd_queue[thread_id]
except KeyError:
return self._cmd_queue.setdefault(thread_id, _queue.Queue()) #@UndefinedVariable
def post_internal_command(self, int_cmd, thread_id):
""" if thread_id is *, post to all """
if thread_id == "*":
threads = threadingEnumerate()
for t in threads:
thread_id = get_thread_id(t)
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
else:
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
def check_output_redirect(self):
global bufferStdOutToServer
global bufferStdErrToServer
if bufferStdOutToServer:
init_stdout_redirect()
self.check_output(sys.stdoutBuf, 1) #@UndefinedVariable
if bufferStdErrToServer:
init_stderr_redirect()
self.check_output(sys.stderrBuf, 2) #@UndefinedVariable
def check_output(self, out, outCtx):
'''Checks the output to see if we have to send some buffered output to the debug server
@param out: sys.stdout or sys.stderr
@param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it)
'''
try:
v = out.getvalue()
if v:
self.cmd_factory.make_io_message(v, outCtx, self)
except:
traceback.print_exc()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from _pydev_bundle.pydev_import_hook import import_hook_manager
for module in dict_keys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, self.mpl_modules_for_patching.pop(module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def _activate_mpl_if_needed(self):
if len(self.mpl_modules_for_patching) > 0:
for module in dict_keys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = self.mpl_modules_for_patching.pop(module)
activate_function()
self.mpl_in_use = True
def _call_mpl_hook(self):
try:
from pydev_ipython.inputhook import get_inputhook
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
def suspend_all_other_threads(self, thread_suspended_at_bp):
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif hasattr(t, 'pydev_do_not_trace'):
pass # skip some other threads, i.e. ipython history saving thread from debug console
else:
if t is thread_suspended_at_bp:
continue
additional_info = None
try:
additional_info = t.additional_info
except AttributeError:
pass # that's ok, no info currently set
if additional_info is not None:
for frame in additional_info.iter_frames(t):
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True)
del frame
self.set_suspend(t, CMD_THREAD_SUSPEND)
else:
sys.stderr.write("Can't suspend thread: %s\n" % (t,))
def process_internal_commands(self):
'''This function processes internal commands
'''
self._main_lock.acquire()
try:
self.check_output_redirect()
curr_thread_id = get_thread_id(threadingCurrentThread())
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
self._lock_running_thread_ids.acquire()
try:
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
elif is_thread_alive(t):
if not self._running_thread_ids:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
# TODO: Investigate: should we do this for all threads in threading.enumerate()?
# (i.e.: if a fork happens on Linux, this seems likely).
old_thread_id = get_thread_id(t)
if old_thread_id != 'console_main':
# The console_main is a special thread id used in the console and its id should never be reset
# (otherwise we may no longer be able to get its variables -- see: https://www.brainwy.com/tracker/PyDev/776).
clear_cached_thread_id(t)
clear_cached_thread_id(threadingCurrentThread())
thread_id = get_thread_id(t)
curr_thread_id = get_thread_id(threadingCurrentThread())
if pydevd_vars.has_additional_frames_by_id(old_thread_id):
frames_by_id = pydevd_vars.get_additional_frames_by_id(old_thread_id)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_by_id)
else:
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
if thread_id not in self._running_thread_ids:
if not hasattr(t, 'additional_info'):
# see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
# Let's create the additional info right away!
t.additional_info = PyDBAdditionalThreadInfo()
self._running_thread_ids[thread_id] = t
self.writer.add_command(self.cmd_factory.make_thread_created_message(t))
queue = self.get_internal_queue(thread_id)
cmdsToReadd = [] # some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydevd_log(2, "Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydevd_log(2, "processing internal command ", str(int_cmd))
int_cmd.do_it(self)
else:
pydevd_log(2, "NOT processing internal command ", str(int_cmd))
cmdsToReadd.append(int_cmd)
except _queue.Empty: #@UndefinedVariable
for int_cmd in cmdsToReadd:
queue.put(int_cmd)
# this is how we exit
thread_ids = list(self._running_thread_ids.keys())
for tId in thread_ids:
if tId not in program_threads_alive:
program_threads_dead.append(tId)
finally:
self._lock_running_thread_ids.release()
for tId in program_threads_dead:
try:
self._process_thread_not_alive(tId)
except:
sys.stderr.write('Error iterating through %s (%s) - %s\n' % (
program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive)))
raise
if len(program_threads_alive) == 0:
self.finish_debugging_session()
for t in all_threads:
if hasattr(t, 'do_kill_pydev_thread'):
t.do_kill_pydev_thread()
finally:
self._main_lock.release()
def disable_tracing_while_running_if_frame_eval(self):
pydevd_tracing.settrace_while_running_if_frame_eval(self, self.dummy_trace_dispatch)
def enable_tracing_in_frames_while_running_if_frame_eval(self):
pydevd_tracing.settrace_while_running_if_frame_eval(self, self.trace_dispatch)
def set_tracing_for_untraced_contexts_if_not_frame_eval(self, ignore_frame=None, overwrite_prev_trace=False):
if self.frame_eval_func is not None:
return
self.set_tracing_for_untraced_contexts(ignore_frame, overwrite_prev_trace)
def set_tracing_for_untraced_contexts(self, ignore_frame=None, overwrite_prev_trace=False):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
if self.frame_eval_func is not None:
return
threads = threadingEnumerate()
try:
for t in threads:
if getattr(t, 'is_pydev_daemon_thread', False):
continue
# TODO: optimize so that we only actually add that tracing if it's in
# the new breakpoint context.
additional_info = None
try:
additional_info = t.additional_info
except AttributeError:
pass # that's ok, no info currently set
if additional_info is not None:
for frame in additional_info.iter_frames(t):
if frame is not ignore_frame:
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=overwrite_prev_trace)
finally:
frame = None
t = None
threads = None
additional_info = None
def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints):
break_dict = {}
for breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[file] = break_dict
global_cache_skips.clear()
global_cache_frame_skips.clear()
def add_break_on_exception(
self,
exception,
condition,
expression,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
condition,
expression,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,))
return None
if eb.notify_on_terminate:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,))
self.break_on_uncaught_exceptions = cp
if eb.notify_always:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook always: %s\n" % (cp,))
self.break_on_caught_exceptions = cp
return eb
def update_after_exceptions_added(self, added):
updated_on_caught = False
updated_on_uncaught = False
for eb in added:
if not updated_on_uncaught and eb.notify_on_terminate:
updated_on_uncaught = True
update_exception_hook(self)
if not updated_on_caught and eb.notify_always:
updated_on_caught = True
self.set_tracing_for_untraced_contexts_if_not_frame_eval()
def _process_thread_not_alive(self, threadId):
""" if thread is not alive, cancel trace_dispatch processing """
self._lock_running_thread_ids.acquire()
try:
thread = self._running_thread_ids.pop(threadId, None)
if thread is None:
return
wasNotified = thread.additional_info.pydev_notify_kill
if not wasNotified:
thread.additional_info.pydev_notify_kill = True
finally:
self._lock_running_thread_ids.release()
cmd = self.cmd_factory.make_thread_killed_message(threadId)
self.writer.add_command(cmd)
def set_suspend(self, thread, stop_reason):
thread.additional_info.suspend_type = PYTHON_SUSPEND
thread.additional_info.pydev_state = STATE_SUSPEND
thread.stop_reason = stop_reason
# If conditional breakpoint raises any exception during evaluation send details to Java
if stop_reason == CMD_SET_BREAK and self.suspend_on_breakpoint_exception:
self._send_breakpoint_condition_exception(thread)
def _send_breakpoint_condition_exception(self, thread):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = get_thread_id(thread)
conditional_breakpoint_exception_tuple = thread.additional_info.conditional_breakpoint_exception
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
# Reset the conditional_breakpoint_exception details to None
thread.additional_info.conditional_breakpoint_exception = None
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack_proceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands()
def send_process_created_message(self):
"""Sends a message that a new process has been created.
"""
cmd = self.cmd_factory.make_process_created_message()
self.writer.add_command(cmd)
def set_next_statement(self, frame, event, func_name, next_line):
stop = False
response_msg = ""
old_line = frame.f_lineno
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == func_name:
line = next_line
if frame.f_lineno == line:
stop = True
else:
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
else:
response_msg = "jump is available only within the bottom frame"
return stop, old_line, response_msg
def do_wait_suspend(self, thread, frame, event, arg, suspend_type="trace", send_suspend_message=True): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
"""
self.process_internal_commands()
if send_suspend_message:
message = thread.additional_info.pydev_message
cmd = self.cmd_factory.make_thread_suspend_message(get_thread_id(thread), frame, thread.stop_reason, message, suspend_type)
self.writer.add_command(cmd)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
from_this_thread = []
for frame_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
# print >> sys.stderr, 'Frame created: ', frame_id
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(frame_id, custom_frame.name))
self.writer.add_command(self.cmd_factory.make_thread_suspend_message(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, "", suspend_type))
from_this_thread.append(frame_id)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
info = thread.additional_info
if info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
# before every stop check if matplotlib modules were imported inside script code
self._activate_mpl_if_needed()
while info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
# process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE:
info.pydev_step_stop = None
info.pydev_smart_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
self.set_trace_for_frame_and_parents(frame)
info.pydev_step_stop = None
info.pydev_smart_step_stop = frame
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT:
self.set_trace_for_frame_and_parents(frame)
stop = False
response_msg = ""
old_line = frame.f_lineno
if not IS_PYCHARM:
stop, _, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
if stop:
info.pydev_state = STATE_SUSPEND
self.do_wait_suspend(thread, frame, event, arg, "trace")
return
else:
try:
stop, old_line, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
except ValueError as e:
response_msg = "%s" % e
finally:
seq = info.pydev_message
cmd = self.cmd_factory.make_set_next_stmnt_status_message(seq, stop, response_msg)
self.writer.add_command(cmd)
info.pydev_message = ''
if stop:
info.pydev_state = STATE_RUN
# `f_line` should be assigned within a tracing function, so, we can't assign it here
# for the frame evaluation debugger. For tracing debugger it will be assigned, but we should
# revert the previous value, because both debuggers should behave the same way
try:
self.set_next_statement(frame, event, info.pydev_func_name, old_line)
except:
pass
else:
info.pydev_step_cmd = -1
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_THREAD_SUSPEND
# return to the suspend state and wait for other command
self.do_wait_suspend(thread, frame, event, arg, "trace", send_suspend_message=False)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
if self.frame_eval_func is not None and info.pydev_state == STATE_RUN:
if info.pydev_step_cmd == -1:
if not self.do_not_use_frame_eval:
self.SetTrace(self.dummy_trace_dispatch)
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True, dispatch_func=dummy_trace_dispatch)
else:
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True)
# enable old tracing function for stepping
self.SetTrace(self.trace_dispatch)
del frame
cmd = self.cmd_factory.make_thread_run_message(get_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print >> sys.stderr, 'Removing created frame: ', frame_id
self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id))
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
def handle_post_mortem_stop(self, thread, frame, frames_byid, exception):
pydev_log.debug("We are stopping in post-mortem\n")
thread_id = get_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_byid)
try:
try:
add_exception_to_frame(frame, exception)
self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK)
self.do_wait_suspend(thread, frame, 'exception', None, "trace")
except:
pydev_log.error("We've got an error while stopping in post-mortem: %s\n"%sys.exc_info()[0])
finally:
pydevd_vars.remove_additional_frame_by_id(thread_id)
def set_trace_for_frame_and_parents(self, frame, also_add_to_passed_frame=True, overwrite_prev_trace=False, dispatch_func=None):
if dispatch_func is None:
dispatch_func = self.trace_dispatch
if also_add_to_passed_frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
while frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
del frame
def update_trace(self, frame, dispatch_func, overwrite_prev):
if frame.f_trace is None:
frame.f_trace = dispatch_func
else:
if overwrite_prev:
frame.f_trace = dispatch_func
else:
try:
#If it's the trace_exception, go back to the frame trace dispatch!
if frame.f_trace.im_func.__name__ == 'trace_exception':
frame.f_trace = frame.f_trace.im_self.trace_dispatch
except AttributeError:
pass
frame = frame.f_back
del frame
def prepare_to_run(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
if self.signature_factory is not None or self.thread_analyser is not None:
# we need all data to be sent to IDE even after program finishes
CheckOutputThread(self).start()
# turn off frame evaluation for concurrency visualization
self.frame_eval_func = None
self.patch_threads()
pydevd_tracing.SetTrace(self.trace_dispatch, self.frame_eval_func, self.dummy_trace_dispatch)
# There is no need to set tracing function if frame evaluation is available. Moreover, there is no need to patch thread
# functions, because frame evaluation function is set to all threads by default.
PyDBCommandThread(self).start()
if show_tracing_warning or show_frame_eval_warning:
cmd = self.cmd_factory.make_show_cython_warning_message()
self.writer.add_command(cmd)
def patch_threads(self):
try:
# not available in jython!
import threading
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import patch_thread_modules
patch_thread_modules()
def get_fullname(self, mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def run(self, file, globals=None, locals=None, is_module=False, set_trace=True):
module_name = None
if is_module:
file, _, entry_point_fn = file.partition(':')
module_name = file
filename = self.get_fullname(file)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
if set_trace:
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
if not is_module:
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run must be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(file)[0])
while not self.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
if self.break_on_caught_exceptions or (self.plugin and self.plugin.has_exception_breaks()) or self.signature_factory:
# disable frame evaluation if there are exception breakpoints with 'On raise' activation policy
# or if there are plugin exception breakpoints or if collecting run-time types is enabled
self.frame_eval_func = None
# call prepare_to_run when we already have all information about breakpoints
self.prepare_to_run()
if self.thread_analyser is not None:
wrap_threads()
t = threadingCurrentThread()
self.thread_analyser.set_start_time(cur_time())
send_message("threading_event", 0, t.getName(), get_thread_id(t), "thread", "start", file, 1, None, parent=get_thread_id(t))
if self.asyncio_analyser is not None:
# we don't have main thread in asyncio graph, so we should add a fake event
send_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None)
try:
if INTERACTIVE_MODE_AVAILABLE:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
traceback.print_exc()
if hasattr(sys, 'exc_clear'):
# we should clean exception information in Python 2, before user's code execution
sys.exc_clear()
if not is_module:
pydev_imports.execfile(file, globals, locals) # execute the script
else:
# treat ':' as a seperator between module and entry point function
# if there is no entry point we run we same as with -m switch. Otherwise we perform
# an import and execute the entry point
if entry_point_fn:
mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)
func = getattr(mod, entry_point_fn)
func()
else:
# Run with the -m switch
import runpy
if hasattr(runpy, '_run_module_as_main'):
# Newer versions of Python actually use this when the -m switch is used.
if sys.version_info[:2] <= (2, 6):
runpy._run_module_as_main(module_name, set_argv0=False)
else:
runpy._run_module_as_main(module_name, alter_argv=False)
else:
runpy.run_module(module_name)
return globals
def exiting(self):
sys.stdout.flush()
sys.stderr.flush()
self.check_output_redirect()
cmd = self.cmd_factory.make_exit_message()
self.writer.add_command(cmd)
def wait_for_commands(self, globals):
self._activate_mpl_if_needed()
thread = threading.currentThread()
from _pydevd_bundle import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = get_thread_id(thread)
from _pydevd_bundle import pydevd_vars
pydevd_vars.add_additional_frame_by_id(thread_id, {id(frame): frame})
cmd = self.cmd_factory.make_show_console_message(thread_id, frame)
self.writer.add_command(cmd)
while True:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
trace_dispatch = _trace_dispatch
frame_eval_func = frame_eval_func
dummy_trace_dispatch = dummy_trace_dispatch
enable_cache_frames_without_breaks = enable_cache_frames_without_breaks
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def enable_qt_support(qt_support_mode):
from _pydev_bundle import pydev_monkey_qt
pydev_monkey_qt.patch_qt(qt_support_mode)
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def init_stdout_redirect():
if not getattr(sys, 'stdoutBuf', None):
sys.stdoutBuf = pydevd_io.IOBuf()
sys.stdout_original = sys.stdout
sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable
def init_stderr_redirect():
if not getattr(sys, 'stderrBuf', None):
sys.stderrBuf = pydevd_io.IOBuf()
sys.stderr_original = sys.stderr
sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable
def has_data_to_redirect():
if getattr(sys, 'stdoutBuf', None):
if not sys.stdoutBuf.empty():
return True
if getattr(sys, 'stderrBuf', None):
if not sys.stderrBuf.empty():
return True
return False
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: if True we'll reset the frame.f_trace of frames which are already being traced
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
)
finally:
_set_trace_lock.release()
_set_trace_lock = thread.allocate_lock()
def _locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
):
if patch_multiprocessing:
try:
from _pydev_bundle import pydev_monkey
except:
pass
else:
pydev_monkey.patch_new_process_functions()
global connected
global bufferStdOutToServer
global bufferStdErrToServer
if not connected:
pydevd_vm_type.setup_type()
if SetupHolder.setup is None:
setup = {
'client': host, # dispatch expects client to be set to the host address when server is False
'server': False,
'port': int(port),
'multiprocess': patch_multiprocessing,
}
SetupHolder.setup = setup
debugger = PyDB()
debugger.connect(host, port) # Note: connect can raise error.
# Mark connected only if it actually succeeded.
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
if bufferStdOutToServer:
init_stdout_redirect()
if bufferStdErrToServer:
init_stderr_redirect()
patch_stdin(debugger)
debugger.set_trace_for_frame_and_parents(get_frame(), False, overwrite_prev_trace=overwrite_prev_trace)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
debugger.set_trace_for_frame_and_parents(custom_frame.frame, False)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
t = threadingCurrentThread()
try:
additional_info = t.additional_info
except AttributeError:
additional_info = PyDBAdditionalThreadInfo()
t.additional_info = additional_info
while not debugger.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
global forked
frame_eval_for_tracing = debugger.frame_eval_func
if frame_eval_func is not None and not forked:
# Disable frame evaluation for Remote Debug Server
frame_eval_for_tracing = None
# note that we do that through pydevd_tracing.SetTrace so that the tracing
# is not warned to the user!
pydevd_tracing.SetTrace(debugger.trace_dispatch, frame_eval_for_tracing, debugger.dummy_trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# As this is the first connection, also set tracing for any untraced threads
debugger.set_tracing_for_untraced_contexts(ignore_frame=get_frame(), overwrite_prev_trace=overwrite_prev_trace)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
PyDBCommandThread(debugger).start()
CheckOutputThread(debugger).start()
#Suspend as the last thing after all tracing is in place.
if suspend:
debugger.set_suspend(t, CMD_THREAD_SUSPEND)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
debugger = get_global_debugger()
debugger.set_trace_for_frame_and_parents(get_frame(), False)
t = threadingCurrentThread()
try:
additional_info = t.additional_info
except AttributeError:
additional_info = PyDBAdditionalThreadInfo()
t.additional_info = additional_info
pydevd_tracing.SetTrace(debugger.trace_dispatch, debugger.frame_eval_func, debugger.dummy_trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
if suspend:
debugger.set_suspend(t, CMD_THREAD_SUSPEND)
def stoptrace():
global connected
if connected:
pydevd_tracing.restore_sys_set_trace_func()
sys.settrace(None)
try:
#not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
debugger = get_global_debugger()
if debugger:
debugger.set_trace_for_frame_and_parents(
get_frame(), also_add_to_passed_frame=True, overwrite_prev_trace=True, dispatch_func=lambda *args:None)
debugger.exiting()
kill_all_pydev_threads()
connected = False
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = start_client(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.pydev_do_not_trace = False #we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.do_kill_pydev_thread()
except :
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(self, self.dispatcher.client)
def _on_run(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread._on_run(self)
def handle_except(self):
ReaderThread.handle_except(self)
def process_command(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self.killReceived = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked():
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
host, port = dispatch()
from _pydevd_bundle import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if port is not None:
global connected
connected = False
global forked
forked = True
custom_frames_container_init()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
)
#=======================================================================================================================
# SetupHolder
#=======================================================================================================================
class SetupHolder:
setup = None
def apply_debugger_options(setup_options):
"""
:type setup_options: dict[str, bool]
"""
default_options = {'save-signatures': False, 'qt-support': ''}
default_options.update(setup_options)
setup_options = default_options
debugger = GetGlobalDebugger()
if setup_options['save-signatures']:
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from _pydevd_bundle.pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
if setup_options['qt-support']:
enable_qt_support(setup_options['qt-support'])
def patch_stdin(debugger):
from _pydev_bundle.pydev_console_utils import DebugConsoleStdIn
orig_stdin = sys.stdin
sys.stdin = DebugConsoleStdIn(debugger, orig_stdin)
# Dispatch on_debugger_modules_loaded here, after all primary debugger modules are loaded
from _pydevd_bundle.pydevd_extension_api import DebuggerEventHandler
from _pydevd_bundle import pydevd_extension_utils
for handler in pydevd_extension_utils.extensions_of_type(DebuggerEventHandler):
handler.on_debugger_modules_loaded(debugger_version=__version__)
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# parse the command line. --file is our last argument that is required
try:
from _pydevd_bundle.pydevd_command_line_handling import process_command_line
setup = process_command_line(sys.argv)
SetupHolder.setup = setup
except ValueError:
traceback.print_exc()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
fix_getpass.fix_getpass()
pydev_log.debug("Executing file %s" % setup['file'])
pydev_log.debug("arguments: %s"% str(sys.argv))
pydevd_vm_type.setup_type(setup.get('vm_type', None))
if os.getenv('PYCHARM_DEBUG') == 'True' or os.getenv('PYDEV_DEBUG') == 'True':
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
debugger = PyDB()
try:
from _pydev_bundle import pydev_monkey
except:
pass #Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
global DISPATCH_APPROACH
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n" %port)
pydev_log.info("pydev debugger: process %d is connecting\n"% os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
else:
pydev_log.error("pydev debugger: couldn't get port for new debug process\n")
finally:
dispatcher.close()
else:
pydev_log.info("pydev debugger: starting\n")
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
traceback.print_exc()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
from _pydevd_bundle import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
# It's ok not having stackless there...
try:
sys.exc_clear() # the exception information should be cleaned in Python 2
except:
pass
is_module = setup['module']
patch_stdin(debugger)
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
if setup['save-threading']:
debugger.thread_analyser = ThreadingLogger()
if setup['save-asyncio']:
if IS_PY34_OR_GREATER:
debugger.asyncio_analyser = AsyncioLogger()
apply_debugger_options(setup)
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
global connected
connected = True # Mark that we're connected when started from inside ide.
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
if __name__ == '__main__':
main()
| apache-2.0 |
dpshelio/sunpy | examples/plotting/Finding_Local_Peaks_in_Solar_Data.py | 2 | 3152 | """
=================================
Finding Local Peaks in Solar Data
=================================
Detecting intensity peaks in solar images can be useful, for example as
a simple flare identification mechanism. This example illustrates detection
of areas where there is a spike in solar intensity.
We use the `~skimage.feature.peak_local_max` function in the scikit-image library
to find those regions in the map data where the intensity values form a local maxima.
Then we plot those peaks in the original AIA plot.
"""
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from skimage.feature import peak_local_max
import sunpy.map
from sunpy.data.sample import AIA_193_IMAGE
###############################################################################
# We will first create a Map using some sample data and display it.
aiamap = sunpy.map.Map(AIA_193_IMAGE)
plt.figure()
aiamap.plot()
plt.colorbar()
###############################################################################
# Before we find regions of local maxima, we need to create some variables to
# store pixel coordinates for the 2D SDO/AIA data we are using.
# These variables are used for plotting in 3D later on.
x = np.arange(aiamap.data.shape[0])
y = np.arange(aiamap.data.shape[1])
X, Y = np.meshgrid(x, y)
#######################################################################################
# We will only consider peaks within the AIA data that have minimum intensity
# value equal to ``threshold_rel * max(Intensity)`` which is 20% of the maximum intensity.
# The next step is to calculate the pixel locations of local maxima
# positions where peaks are separated by at least ``min_distance = 60 pixels``.
# This function comes from scikit image and the documenation is found
# here `~skimage.feature.peak_local_max`.
coordinates = peak_local_max(aiamap.data, min_distance=60, threshold_rel=0.2)
###############################################################################
# We now check for the indices at which we get such a local maxima and plot
# those positions marked red in the aiamap data.
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, aiamap.data)
ax.view_init(elev=39, azim=64)
peaks_pos = aiamap.data[coordinates[:, 0], coordinates[:, 1]]
ax.scatter(coordinates[:, 1], coordinates[:, 0], peaks_pos, color='r')
ax.set_xlabel('X Coordinates')
ax.set_ylabel('Y Coordinates')
ax.set_zlabel('Intensity')
###############################################################################
# Now we need to turn the pixel coordinates into the world location so
# they can be easily overlaid on the Map.
hpc_max = aiamap.pixel_to_world(coordinates[:, 1]*u.pixel, coordinates[:, 0]*u.pixel)
###############################################################################
# Finally we do an AIA plot to check for the local maxima locations
# which will be marked with a blue `x` label.
fig = plt.figure()
ax = plt.subplot(projection=aiamap)
aiamap.plot()
ax.plot_coord(hpc_max, 'bx')
plt.show()
| bsd-2-clause |
jniediek/mne-python | mne/viz/tests/test_3d.py | 5 | 9195 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_raises, assert_equal
from mne import (make_field_map, pick_channels_evoked, read_evokeds,
read_trans, read_dipole, SourceEstimate)
from mne.io import read_raw_ctf, read_raw_bti, read_raw_kit
from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
plot_trans)
from mne.utils import requires_mayavi, requires_pysurfer, run_tests_if_main
from mne.datasets import testing
from mne.source_space import read_source_spaces
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
trans_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
ctf_fname = op.join(data_dir, 'CTF', 'testdata_ctf.ds')
io_dir = op.join(op.abspath(op.dirname(__file__)), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
base_dir = op.join(io_dir, 'bti', 'tests', 'data')
pdf_fname = op.join(base_dir, 'test_pdf_linux')
config_fname = op.join(base_dir, 'test_config_linux')
hs_fname = op.join(base_dir, 'test_hs_linux')
sqd_fname = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
warnings.simplefilter('always') # enable b/c these tests throw warnings
@testing.requires_testing_data
@requires_pysurfer
@requires_mayavi
def test_plot_sparse_source_estimates():
"""Test plotting of (sparse) source estimates
"""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size / 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size / 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
plot_source_estimates(stc, 'sample', colormap=colormap,
background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True,
clim='auto')
assert_raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both', clim='auto',
subjects_dir=subjects_dir)
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
@testing.requires_testing_data
@requires_mayavi
def test_plot_evoked_field():
"""Test plotting evoked field
"""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed
for t in ['meg', None]:
with warnings.catch_warnings(record=True): # bad proj
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1,
ch_type=t)
evoked.plot_field(maps, time=0.1)
@testing.requires_testing_data
@requires_mayavi
def test_plot_trans():
"""Test plotting of -trans.fif files and MEG sensor layouts
"""
evoked = read_evokeds(evoked_fname)[0]
with warnings.catch_warnings(record=True): # 4D weight tables
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
ref_meg = False if system == 'KIT' else True
plot_trans(info, trans_fname, subject='sample', meg_sensors=True,
subjects_dir=subjects_dir, ref_meg=ref_meg)
# KIT ref sensor coil def is defined
plot_trans(infos['KIT'], None, meg_sensors=True, ref_meg=True)
info = infos['Neuromag']
assert_raises(ValueError, plot_trans, info, trans_fname,
subject='sample', subjects_dir=subjects_dir,
ch_type='bad-chtype')
assert_raises(TypeError, plot_trans, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
# no-head version
plot_trans(info, None, meg_sensors=True, dig=True, coord_frame='head')
# EEG only with strange options
with warnings.catch_warnings(record=True) as w:
plot_trans(evoked.copy().pick_types(meg=False, eeg=True).info,
trans=trans_fname, meg_sensors=True)
assert_true(['Cannot plot MEG' in str(ww.message) for ww in w])
@testing.requires_testing_data
@requires_pysurfer
@requires_mayavi
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
stc.plot(subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(ValueError, stc.plot, clim='auto', figure=figs,
subjects_dir=subjects_dir)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)),
subjects_dir=subjects_dir)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)),
subjects_dir=subjects_dir)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
subjects_dir=subjects_dir)
# Test handling of degenerate data
stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
subjects_dir=subjects_dir) # ok
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(1.)
plot_source_estimates(stc, subjects_dir=subjects_dir, time_unit='s')
assert_equal(len(w), 0)
stc._data[0].fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir, time_unit='s')
assert_equal(len(w), 0)
stc._data.fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir, time_unit='s')
assert_equal(len(w), 1)
mlab.close()
@testing.requires_testing_data
@requires_mayavi
def test_plot_dipole_locations():
"""Test plotting dipole locations
"""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
dipoles.plot_locations(trans, 'sample', subjects_dir, fig_name='foo')
assert_raises(ValueError, dipoles.plot_locations, trans, 'sample',
subjects_dir, mode='foo')
run_tests_if_main()
| bsd-3-clause |
GGiecold/pyRMT | pyRMT.py | 1 | 26076 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Python for Random Matrix Theory. This package implements several
cleaning schemes for noisy correlation matrices, including
the optimal shrinkage, rotationally-invariant estimator
to an underlying correlation matrix (as proposed by Joel Bun,
Jean-Philippe Bouchaud, Marc Potters and colleagues).
Such cleaned correlation matrix are known to improve factor-decomposition
via Principal Component Analysis (PCA) and could be of relevance in a variety
of contexts, including computational biology.
Cleaning schemes also result in much improved out-of-sample risk
of Markowitz optimal portfolios, as established over the years
in several papers by Jean-Philippe Bouchaud, Marc Potters and collaborators.
Some cleaning schemes can be easily adapted from the various shrinkage
estimators implemented in the sklearn.covariance module
(see the various publications by O. Ledoit and M. Wolf listed below).
In addition, it might make sense to perform an empirical estimate
of a correlation matrix robust to outliers before proceeding with
the cleaning schemes of the present module. Some of those robust estimates
have been implemented in the sklearn.covariance module as well.
References
----------
* "DISTRIBUTION OF EIGENVALUES FOR SOME SETS OF RANDOM MATRICES",
V. A. Marcenko and L. A. Pastur
Mathematics of the USSR-Sbornik, Vol. 1 (4), pp 457-483
* "A well-conditioned estimator for large-dimensional covariance matrices",
O. Ledoit and M. Wolf
Journal of Multivariate Analysis, Vol. 88 (2), pp 365-411
* "Improved estimation of the covariance matrix of stock returns with "
"an application to portfolio selection",
O. Ledoit and M. Wolf
Journal of Empirical Finance, Vol. 10 (5), pp 603-621
* "Financial Applications of Random Matrix Theory: a short review",
J.-P. Bouchaud and M. Potters
arXiv: 0910.1205 [q-fin.ST]
* "Eigenvectors of some large sample covariance matrix ensembles",
O. Ledoit and S. Peche
Probability Theory and Related Fields, Vol. 151 (1), pp 233-264
* "NONLINEAR SHRINKAGE ESTIMATION OF LARGE-DIMENSIONAL COVARIANCE MATRICES",
O. Ledoit and M. Wolf
The Annals of Statistics, Vol. 40 (2), pp 1024-1060
* "Rotational invariant estimator for general noisy matrices",
J. Bun, R. Allez, J.-P. Bouchaud and M. Potters
arXiv: 1502.06736 [cond-mat.stat-mech]
* "Cleaning large Correlation Matrices: tools from Random Matrix Theory",
J. Bun, J.-P. Bouchaud and M. Potters
arXiv: 1610.08104 [cond-mat.stat-mech]
* "Direct Nonlinear Shrinkage Estimation of Large-Dimensional Covariance Matrices (September 2017)",
O. Ledoit and M. Wolf https://ssrn.com/abstract=3047302 or http://dx.doi.org/10.2139/ssrn.3047302
"""
from __future__ import division, print_function
from builtins import reversed
from builtins import map, zip
from collections import MutableSequence, Sequence
import copy
from math import ceil
from numbers import Complex, Integral, Real
import sys
import warnings
import numpy as np
import pandas as pd
from sklearn.covariance import EmpiricalCovariance
from sklearn.preprocessing import StandardScaler
__author__ = 'Gregory Giecold and Lionel Ouaknin'
__copyright__ = 'Copyright 2017-2022 Gregory Giecold and contributors'
__credit__ = 'Gregory Giecold and Lionel Ouaknin'
__status__ = 'beta'
__version__ = '0.1.0'
__all__ = ['clipped', 'directKernel', 'marcenkoPastur',
'optimalShrinkage', 'poolAdjacentViolators',
'stieltjes']
def checkDesignMatrix(X):
"""
Parameters
----------
X: a matrix of shape (T, N), where T denotes the number
of samples and N labels the number of features.
If T < N, a warning is issued to the user, and the transpose
of X is considered instead.
Returns:
T: type int
N: type int
transpose_flag: type bool
Specify if the design matrix X should be transposed
in view of having less rows than columns.
"""
try:
assert isinstance(X, (np.ndarray, pd.DataFrame, pd.Series,
MutableSequence, Sequence))
except AssertionError:
raise
sys.exit(1)
X = np.asarray(X, dtype=float)
X = np.atleast_2d(X)
if X.shape[0] < X.shape[1]:
warnings.warn("The Marcenko-Pastur distribution pertains to "
"the empirical covariance matrix of a random matrix X "
"of shape (T, N). It is assumed that the number of "
"samples T is assumed higher than the number of "
"features N. The transpose of the matrix X submitted "
"at input will be considered in the cleaning schemes "
"for the corresponding correlation matrix.", UserWarning)
T, N = reversed(X.shape)
transpose_flag = True
else:
T, N = X.shape
transpose_flag = False
return T, N, transpose_flag
def marcenkoPastur(X):
"""
Parameter
---------
X: random matrix of shape (T, N), with T denoting the number
of samples, whereas N refers to the number of features.
It is assumed that the variance of the elements of X
has been normalized to unity.
Returns
-------
(lambda_min, lambda_max): type tuple
Bounds to the support of the Marcenko-Pastur distribution
associated to random matrix X.
rho: type function
The Marcenko-Pastur density.
Reference
---------
"DISTRIBUTION OF EIGENVALUES FOR SOME SETS OF RANDOM MATRICES",
V. A. Marcenko and L. A. Pastur
Mathematics of the USSR-Sbornik, Vol. 1 (4), pp 457-483
"""
T, N, _ = checkDesignMatrix(X)
q = N / float(T)
lambda_min = (1 - np.sqrt(q))**2
lambda_max = (1 + np.sqrt(q))**2
def rho(x):
ret = np.sqrt((lambda_max - x) * (x - lambda_min))
ret /= 2 * np.pi * q * x
return ret if lambda_min < x < lambda_max else 0.0
return (lambda_min, lambda_max), rho
def clipped(X, alpha=None, return_covariance=False):
"""Clips the eigenvalues of an empirical correlation matrix E
in order to provide a cleaned estimator E_clipped of the
underlying correlation matrix.
Proceeds by keeping the [N * alpha] top eigenvalues and shrinking
the remaining ones by a trace-preserving constant
(i.e. Tr(E_clipped) = Tr(E)).
Parameters
----------
X: design matrix, of shape (T, N), where T denotes the number
of samples (think measurements in a time series), while N
stands for the number of features (think of stock tickers).
alpha: type float or derived from numbers.Real (default: None)
Parameter between 0 and 1, inclusive, determining the fraction
to keep of the top eigenvalues of an empirical correlation matrix.
If left unspecified, alpha is chosen so as to keep all the
empirical eigenvalues greater than the upper limit of
the support to the Marcenko-Pastur spectrum. Indeed, such
eigenvalues can be considered as associated with some signal,
whereas the ones falling inside the Marcenko-Pastur range
should be considered as corrupted with noise and indistinguishable
from the spectrum of the correlation of a random matrix.
This ignores finite-size effects that make it possible
for the eigenvalues to exceed the upper and lower edges
defined by the Marcenko-Pastur spectrum (cf. a set of results
revolving around the Tracy-Widom distribution)
return_covariance: type bool (default: False)
If set to True, compute the standard deviations of each individual
feature across observations, clean the underlying matrix
of pairwise correlations, then re-apply the standard
deviations and return a cleaned variance-covariance matrix.
Returns
-------
E_clipped: type numpy.ndarray, shape (N, N)
Cleaned estimator of the true correlation matrix C underlying
a noisy, in-sample estimate E (empirical correlation matrix
estimated from X). This cleaned estimator proceeds through
a simple eigenvalue clipping procedure (cf. reference below).
If return_covariance=True, E_clipped corresponds to a cleaned
variance-covariance matrix.
Reference
---------
"Financial Applications of Random Matrix Theory: a short review",
J.-P. Bouchaud and M. Potters
arXiv: 0910.1205 [q-fin.ST]
"""
try:
if alpha is not None:
assert isinstance(alpha, Real) and 0 <= alpha <= 1
assert isinstance(return_covariance, bool)
except AssertionError:
raise
sys.exit(1)
T, N, transpose_flag = checkDesignMatrix(X)
if transpose_flag:
X = X.T
if not return_covariance:
X = StandardScaler(with_mean=False,
with_std=True).fit_transform(X)
ec = EmpiricalCovariance(store_precision=False,
assume_centered=True)
ec.fit(X)
E = ec.covariance_
if return_covariance:
inverse_std = 1./np.sqrt(np.diag(E))
E *= inverse_std
E *= inverse_std.reshape(-1, 1)
eigvals, eigvecs = np.linalg.eigh(E)
eigvecs = eigvecs.T
if alpha is None:
(lambda_min, lambda_max), _ = marcenkoPastur(X)
xi_clipped = np.where(eigvals >= lambda_max, eigvals, np.nan)
else:
xi_clipped = np.full(N, np.nan)
threshold = int(ceil(alpha * N))
if threshold > 0:
xi_clipped[-threshold:] = eigvals[-threshold:]
gamma = float(E.trace() - np.nansum(xi_clipped))
gamma /= np.isnan(xi_clipped).sum()
xi_clipped = np.where(np.isnan(xi_clipped), gamma, xi_clipped)
E_clipped = np.zeros((N, N), dtype=float)
for xi, eigvec in zip(xi_clipped, eigvecs):
eigvec = eigvec.reshape(-1, 1)
E_clipped += xi * eigvec.dot(eigvec.T)
tmp = 1./np.sqrt(np.diag(E_clipped))
E_clipped *= tmp
E_clipped *= tmp.reshape(-1, 1)
if return_covariance:
std = 1./inverse_std
E_clipped *= std
E_clipped *= std.reshape(-1, 1)
return E_clipped
def stieltjes(z, E):
"""
Parameters
----------
z: complex number
E: square matrix
Returns
-------
A complex number, the resolvent of square matrix E,
also known as its Stieltjes transform.
Reference
---------
"Financial Applications of Random Matrix Theory: a short review",
J.-P. Bouchaud and M. Potters
arXiv: 0910.1205 [q-fin.ST]
"""
try:
assert isinstance(z, Complex)
assert isinstance(E, (np.ndarray, pd.DataFrame,
MutableSequence, Sequence))
E = np.asarray(E, dtype=float)
E = np.atleast_2d(E)
assert E.shape[0] == E.shape[1]
except AssertionError:
raise
sys.exit(1)
N = E.shape[0]
ret = z * np.eye(N, dtype=float) - E
ret = np.trace(ret) / N
return ret
def xiHelper(x, q, E):
"""Helper function to the rotationally-invariant, optimal shrinkage
estimator of the true correlation matrix (implemented via function
optimalShrinkage of the present module).
Parameters
----------
x: type derived from numbers.Real
Would typically be expected to be an eigenvalue from the
spectrum of correlation matrix E. The present function
can however handle an arbitrary floating-point number.
q: type derived from numbers.Real
The number parametrizing a Marcenko-Pastur spectrum.
E: type numpy.ndarray
Symmetric correlation matrix associated with the
Marcenko-Pastur parameter q specified above.
Returns
-------
xi: type float
Cleaned eigenvalue of the true correlation matrix C underlying
the empirical correlation E (the latter being corrupted
with in-sample noise). This cleaned version is computed
assuming no prior knowledge on the structure of the true
eigenvectors (thereby leaving the eigenvectors of E unscathed).
References
----------
* "Rotational invariant estimator for general noisy matrices",
J. Bun, R. Allez, J.-P. Bouchaud and M. Potters
arXiv: 1502.06736 [cond-mat.stat-mech]
* "Cleaning large Correlation Matrices: tools from Random Matrix Theory",
J. Bun, J.-P. Bouchaud and M. Potters
arXiv: 1610.08104 [cond-mat.stat-mech]
"""
try:
assert isinstance(x, Real)
assert isinstance(q, Real)
assert isinstance(E, np.ndarray) and E.shape[0] == E.shape[1]
assert np.allclose(E.transpose(1, 0), E)
except AssertionError:
raise
sys.exit(1)
N = E.shape[0]
z = x - 1j / np.sqrt(N)
s = stieltjes(z, E)
xi = x / abs(1 - q + q * z * s)**2
return xi
def gammaHelper(x, q, N, lambda_N, inverse_wishart=False):
"""Helper function to optimalShrinkage function defined below.
The eigenvalue to the cleaned estimator of a true correlation
matrix are computed via the function xiHelper defined above in
the module at hand.
It is known however that when N is not very large
a systematic downward bias affects the xiHelper estimator for small
eigenvalues of the noisy empirical correlation matrix. This bias
can be heuristically corrected by computing
xi_hat = xi_RIE * max(1, Gamma),
with Gamma evaluated by the function gammaHelper herewith.
Parameters
----------
x: type float or any other type derived from numbers.Real
Typically an eigenvalue from the spectrum of a sample
estimate of the correlation matrix associated to some
design matrix X. However, the present function supports
any arbitrary floating-point number x at input.
q: type derived from numbers.Real
Parametrizes a Marcenko-Pastur spectrum.
N: type derived from numbers.Integral
Dimension of a correlation matrix whose debiased,
rotationally-invariant estimator is to be assessed via
the function RIE (see below), of which the present function
is a helper.
lambda_N: type derived from numbers.Real
Smallest eigenvalue from the spectrum of an empirical
estimate to a correlation matrix.
inverse_wishart: type bool default: False
Wether to use inverse wishart regularization
Returns
------
Gamma: type float
Upward correction factor for computing a debiased
rotationally-invariant estimator of a true underlying
correlation matrix.
Reference
---------
"Cleaning large Correlation Matrices: tools from Random Matrix Theory",
J. Bun, J.-P. Bouchaud and M. Potters
arXiv: 1610.08104 [cond-mat.stat-mech]
"""
try:
assert isinstance(x, Real)
assert isinstance(q, Real)
assert isinstance(N, Integral)
assert isinstance(lambda_N, Real)
except AssertionError:
raise
sys.exit(1)
z = x - 1j / np.sqrt(N)
lambda_plus = (1 + np.sqrt(q))**2
lambda_plus /= (1 - np.sqrt(q))**2
lambda_plus *= lambda_N
sigma_2 = lambda_N / (1 - np.sqrt(q))**2
# gmp defined below stands for the Stieltjes transform of the
# rescaled Marcenko-Pastur density, evaluated at z
gmp = z + sigma_2 * (q - 1) - np.sqrt((z - lambda_N) * (z - lambda_plus))
gmp /= 2 * q * sigma_2 * z
Gamma = abs(1 - q + q * z * gmp)**2
Gamma *= sigma_2
if inverse_wishart:
kappa = 2 * lambda_N / ((1 - q - lambda_N) ** 2 - 4 * q * lambda_N)
alpha_s = 1 / (1 + 2 * q * kappa)
denom = x / (1 + alpha_s * (x - 1.))
Gamma /= denom
else:
Gamma /= x
return Gamma
def optimalShrinkage(X, return_covariance=False, method='rie'):
"""This function computes a cleaned, optimal shrinkage,
rotationally-invariant estimator (RIE) of the true correlation
matrix C underlying the noisy, in-sample estimate
E = 1/T X * transpose(X)
associated to a design matrix X of shape (T, N) (T measurements
and N features).
One approach to getting a cleaned estimator that predates the
optimal shrinkage, RIE estimator consists in inverting the
Marcenko-Pastur equation so as to replace the eigenvalues
from the spectrum of E by an estimation of the true ones.
This approach is known to be numerically-unstable, in addition
to failing to account for the overlap between the sample eigenvectors
and the true eigenvectors. How to compute such overlaps was first
explained by Ledoit and Peche (cf. reference below). Their procedure
was extended by Bun, Bouchaud and Potters, who also correct
for a systematic downward bias in small eigenvalues.
It is this debiased, optimal shrinkage, rotationally-invariant
estimator that the function at hand implements.
In addition to above method, this funtion also provides access to:
- The finite N regularization of the optimal RIE for small eigenvalues
as provided in section 8.1 of [3] a.k.a the inverse wishart (IW) regularization.
- The direct kernel method of O. Ledoit and M. Wolf in their 2017 paper [4].
This is a direct port of their Matlab code.
Parameters
----------
X: design matrix, of shape (T, N), where T denotes the number
of samples (think measurements in a time series), while N
stands for the number of features (think of stock tickers).
return_covariance: type bool (default: False)
If set to True, compute the standard deviations of each individual
feature across observations, clean the underlying matrix
of pairwise correlations, then re-apply the standard
deviations and return a cleaned variance-covariance matrix.
method: type string, optional (default="rie")
- If "rie" : optimal shrinkage in the manner of Bun & al.
with no regularisation
- If "iw" : optimal shrinkage in the manner of Bun & al.
with the so called Inverse Wishart regularization
- If 'kernel': Direct kernel method of Ledoit Wolf.
Returns
-------
E_RIE: type numpy.ndarray, shape (N, N)
Cleaned estimator of the true correlation matrix C. A sample
estimator of C is the empirical covariance matrix E
estimated from X. E is corrupted by in-sample noise.
E_RIE is the optimal shrinkage, rotationally-invariant estimator
(RIE) of C computed following the procedure of Joel Bun
and colleagues (cf. references below).
If return_covariance=True, E_clipped corresponds to a cleaned
variance-covariance matrix.
References
----------
1 "Eigenvectors of some large sample covariance matrix ensembles",
O. Ledoit and S. Peche
Probability Theory and Related Fields, Vol. 151 (1), pp 233-264
2 "Rotational invariant estimator for general noisy matrices",
J. Bun, R. Allez, J.-P. Bouchaud and M. Potters
arXiv: 1502.06736 [cond-mat.stat-mech]
3 "Cleaning large Correlation Matrices: tools from Random Matrix Theory",
J. Bun, J.-P. Bouchaud and M. Potters
arXiv: 1610.08104 [cond-mat.stat-mech]
4 "Direct Nonlinear Shrinkage Estimation of Large-Dimensional Covariance Matrices (September 2017)",
O. Ledoit and M. Wolf https://ssrn.com/abstract=3047302 or http://dx.doi.org/10.2139/ssrn.3047302
"""
try:
assert isinstance(return_covariance, bool)
except AssertionError:
raise
sys.exit(1)
T, N, transpose_flag = checkDesignMatrix(X)
if transpose_flag:
X = X.T
if not return_covariance:
X = StandardScaler(with_mean=False,
with_std=True).fit_transform(X)
ec = EmpiricalCovariance(store_precision=False,
assume_centered=True)
ec.fit(X)
E = ec.covariance_
if return_covariance:
inverse_std = 1./np.sqrt(np.diag(E))
E *= inverse_std
E *= inverse_std.reshape(-1, 1)
eigvals, eigvecs = np.linalg.eigh(E)
eigvecs = eigvecs.T
q = N / float(T)
lambda_N = eigvals[0] # The smallest empirical eigenvalue,
# given that the function used to compute
# the spectrum of a Hermitian or symmetric
# matrix - namely np.linalg.eigh - returns
# the eigenvalues in ascending order.
lambda_hats = None
if method is not 'kernel':
use_inverse_wishart = (method == 'iw')
xis = map(lambda x: xiHelper(x, q, E), eigvals)
Gammas = map(lambda x: gammaHelper(x, q, N, lambda_N, inverse_wishart=use_inverse_wishart), eigvals)
xi_hats = map(lambda a, b: a * b if b > 1 else a, xis, Gammas)
lambda_hats = xi_hats
else:
lambda_hats = directKernel(q, T, N, eigvals)
E_RIE = np.zeros((N, N), dtype=float)
for lambda_hat, eigvec in zip(lambda_hats, eigvecs):
eigvec = eigvec.reshape(-1, 1)
E_RIE += lambda_hat * eigvec.dot(eigvec.T)
tmp = 1./np.sqrt(np.diag(E_RIE))
E_RIE *= tmp
E_RIE *= tmp.reshape(-1, 1)
if return_covariance:
std = 1./inverse_std
E_RIE *= std
E_RIE *= std.reshape(-1, 1)
return E_RIE
def directKernel(q, T, N, eigvals):
"""This function computes a non linear shrinkage estimator of a covariance marix
based on the spectral distribution of its eigenvalues and that of its Hilbert Tranform.
This is an extension of Ledoit & Péché(2011).
This is a port of the Matlab code provided by O. Ledoit and M .Wolf. This port
uses the Pool Adjacent Violators (PAV) algorithm by Alexandre Gramfort
(EMAP toolbox). See below for a Python implementation of PAV.
Parameters
----------
q: type derived from numbers.Real
Ratio of N/T
T: type derived from numbers.Integral
Number of samples
N: type derived from numbers.Integral
Dimension of a correlation matrix
eigvals: Vector of the covariance matrix eigenvalues
Returns
-------
dhats: A vector of eigenvalues estimates
References
----------
* "Eigenvectors of some large sample covariance matrix ensembles",
O. Ledoit and S. Peche (2011)
* "Direct Nonlinear Shrinkage Estimation of Large-Dimensional Covariance Matrices (September 2017)",
O. Ledoit and M. Wolf https://ssrn.com/abstract=3047302 or http://dx.doi.org/10.2139/ssrn.3047302
"""
# compute direct kernel estimator
lambdas = eigvals[max(0, N - T):].T # transpose to have a column vector
h = np.power(T, -0.35) # Equation (5.4)
h_squared = h ** 2
L = np.matlib.repmat(lambdas, N, 1).T
Lt = L.transpose()
square_Lt = h_squared * (Lt ** 2)
zeros = np.zeros((N, N))
tmp = np.sqrt(np.maximum(4 * square_Lt - (L - Lt) ** 2, zeros)) / (2 * np.pi * square_Lt)
f_tilde = np.mean(tmp, axis=0) # Equation (5.2)
tmp = np.sign(L - Lt) * np.sqrt(np.maximum((L - Lt) ** 2 - 4 * square_Lt, zeros)) - L + Lt
tmp /= 2 * np.pi * square_Lt
Hf_tilde = np.mean(tmp, axis=1) # Equation (5.3)
if N <= T:
tmp = (np.pi * q * lambdas * f_tilde) ** 2
tmp += (1 - q - np.pi * q * lambdas * Hf_tilde) ** 2
d_tilde = lambdas / tmp # Equation (4.3)
else:
Hf_tilde_0 = (1 - np.sqrt(1 - 4 * h_squared)) / (2 * np.pi * h_squared) * np.mean(1. / lambdas) # Equation (C.8)
d_tilde_0 = 1 / (np.pi * (N - T) / T * Hf_tilde_0) # Equation (C.5)
d_tilde_1 = lambdas / ((np.pi ** 2) * (lambdas ** 2) * (f_tilde ** 2 + Hf_tilde ** 2)) # Equation (C.4)
d_tilde = np.concatenate(np.dot(d_tilde_0, np.ones(N - T, 1, np.float)), d_tilde_1)
d_hats = poolAdjacentViolators(d_tilde) # Equation (4.5)
return d_hats
# Author : Alexandre Gramfort
# license : BSD
def poolAdjacentViolators(y):
"""
PAV uses the pool adjacent violators method to produce a monotonic smoothing of y.
Translated from matlab by Sean Collins (2006), and part of the EMAP toolbox.
"""
y = np.asarray(y)
try:
assert y.ndim == 1
except AssertionError:
raise
sys.exit(1)
n_samples = len(y)
v = y.copy()
lvls = np.arange(n_samples)
lvlsets = np.c_[lvls, lvls]
while True:
deriv = np.diff(v)
if np.all(deriv >= 0):
break
violator = np.where(deriv < 0)[0]
start = lvlsets[violator[0], 0]
last = lvlsets[violator[0] + 1, 1]
s = 0
n = last - start + 1
for i in range(start, last + 1):
s += v[i]
val = s / n
for i in range(start, last + 1):
v[i] = val
lvlsets[i, 0] = start
lvlsets[i, 1] = last
return v
if __name__ == '__main__':
pass
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| apache-2.0 |
costypetrisor/scikit-learn | sklearn/learning_curve.py | 13 | 13351 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/indexes/test_datetimelike.py | 7 | 52802 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.tslib import Timestamp, OutOfBoundsDatetime
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with tm.assertRaises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of DatetimeIndex does not preserve frequency,
# so a differencing operation should not retain the freq field of the
# original index.
i = pd.date_range("20160920", "20160925", freq="D")
a = pd.date_range("20160921", "20160924", freq="D")
expected = pd.DatetimeIndex(["20160920", "20160925"], freq=None)
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.date_range("20160922", "20160925", freq="D")
b_diff = i.difference(b)
expected = pd.DatetimeIndex(["20160920", "20160921"], freq=None)
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_diff = a_diff.union(b_diff)
expected = pd.DatetimeIndex(["20160920", "20160921", "20160925"],
freq=None)
tm.assert_index_equal(union_of_diff, expected)
tm.assert_attr_equal('freq', union_of_diff, expected)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_index_equal(result, Index(idx.asi8))
self.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
def test_shift(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(
idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)
self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)
self.assertEqual(
idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
idx = pd.period_range('2000-01-01', periods=5)[::2]
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
self.assert_index_equal(res, exp)
self.assertEqual(res.freqstr, 'D')
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
self.assert_frame_equal(df, df.ix[idx])
self.assert_frame_equal(df, df.ix[list(idx)])
self.assert_frame_equal(df, df.loc[list(idx)])
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
self.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna(pd.Period('2011-01-01', freq='D')),
exp)
def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex.millisecond
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of Period MUST preserve frequency, but the ability
# to union results must be preserved
i = pd.period_range("20160920", "20160925", freq="D")
a = pd.period_range("20160921", "20160924", freq="D")
expected = pd.PeriodIndex(["20160920", "20160925"], freq='D')
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.period_range("20160922", "20160925", freq="D")
b_diff = i.difference(b)
expected = pd.PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_diff = a_diff.union(b_diff)
expected = pd.PeriodIndex(["20160920", "20160921", "20160925"],
freq='D')
tm.assert_index_equal(union_of_diff, expected)
tm.assert_attr_equal('freq', union_of_diff, expected)
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_construction_base_constructor(self):
arr = [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timedelta('1 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
self.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
self.assertRaises(TypeError, lambda: idx * idx)
self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4H')
for result in [idx / 2, np.divide(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'H')
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2H')
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of TimedeltaIndex does not preserve frequency,
# so a differencing operation should not retain the freq field of the
# original index.
i = pd.timedelta_range("0 days", "5 days", freq="D")
a = pd.timedelta_range("1 days", "4 days", freq="D")
expected = pd.TimedeltaIndex(["0 days", "5 days"], freq=None)
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.timedelta_range("2 days", "5 days", freq="D")
b_diff = i.difference(b)
expected = pd.TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_difference = a_diff.union(b_diff)
expected = pd.TimedeltaIndex(["0 days", "1 days", "5 days"],
freq=None)
tm.assert_index_equal(union_of_difference, expected)
tm.assert_attr_equal('freq', union_of_difference, expected)
| apache-2.0 |
shahankhatch/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
bnaul/scikit-learn | sklearn/linear_model/tests/test_huber.py | 12 | 7600 | # Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model._huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
def loss_func(x, *args):
return _huber_loss_and_gradient(x, *args)[0]
def grad_func(x, *args):
return _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor()
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make
# sure that the number of decimal places used is somewhat insensitive to
# the amplitude of the coefficients and therefore to the scale of the
# data and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor()
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, max_iter=10000,
fit_intercept=False, epsilon=1.35, tol=None)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber
# regressor.
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
# The huber model should also fit poorly on the outliers.
assert ridge_outlier_score > huber_outlier_score
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0,
random_state=0)
X_bool = X > 0
HuberRegressor().fit(X_bool, y)
| bsd-3-clause |
calum-chamberlain/EQcorrscan | eqcorrscan/utils/mag_calc.py | 1 | 49349 | """
Functions to aid magnitude estimation.
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
import numpy as np
import logging
import eqcorrscan # Used to get version number
import os
import glob
import matplotlib.pyplot as plt
import itertools
import copy
import random
import pickle
import math
from inspect import currentframe
from scipy.signal import iirfilter, sosfreqz
from collections import Counter
from obspy import Trace
from obspy.signal.invsim import simulate_seismometer as seis_sim
from obspy.core.event import (
Amplitude, Pick, WaveformStreamID, Origin, ResourceIdentifier)
from obspy.geodetics import degrees2kilometers
Logger = logging.getLogger(__name__)
# Magnitude - frequency funcs
def calc_max_curv(magnitudes, bin_size=0.5, plotvar=False):
"""
Calculate the magnitude of completeness using the maximum curvature method.
:type magnitudes: list or numpy array
:param magnitudes:
List of magnitudes from which to compute the maximum curvature which
will give an estimate of the magnitude of completeness given the
assumption of a power-law scaling.
:type bin_size: float
:param bin_size:
Width of magnitude bins used to compute the non-cumulative distribution
:type plotvar: bool
:param plotvar: Turn plotting on and off
:rtype: float
:return: Magnitude at maximum curvature
.. Note:: Should be used as a guide, often under-estimates Mc.
.. rubric:: Example
>>> import numpy as np
>>> mags = np.arange(3, 6, .1)
>>> N = 10 ** (5 - 1 * mags)
>>> magnitudes = [0, 2, 3, 2.5, 2.2, 1.0] # Some below completeness
>>> for mag, n in zip(mags, N):
... magnitudes.extend([mag for _ in range(int(n))])
>>> calc_max_curv(magnitudes, plotvar=False)
3.0
"""
min_bin, max_bin = int(min(magnitudes)), int(max(magnitudes) + 1)
bins = np.arange(min_bin, max_bin + bin_size, bin_size)
df, bins = np.histogram(magnitudes, bins)
grad = (df[1:] - df[0:-1]) / bin_size
# Need to find the second order derivative
curvature = (grad[1:] - grad[0:-1]) / bin_size
max_curv = bins[np.argmax(np.abs(curvature))] + bin_size
if plotvar:
fig, ax = plt.subplots()
ax.scatter(bins[:-1] + bin_size / 2, df, color="k",
label="Magnitudes")
ax.axvline(x=max_curv, color="red", label="Maximum curvature")
ax1 = ax.twinx()
ax1.plot(bins[:-1] + bin_size / 2, np.cumsum(df[::-1])[::-1],
color="k", label="Cumulative distribution")
ax1.scatter(bins[1:-1], grad, color="r", label="Gradient")
ax2 = ax.twinx()
ax2.scatter(bins[1:-2] + bin_size, curvature, color="blue",
label="Curvature")
# Code borrowed from https://matplotlib.org/3.1.1/gallery/ticks_and_
# spines/multiple_yaxis_with_spines.html#sphx-glr-gallery-ticks-and-
# spines-multiple-yaxis-with-spines-py
ax2.spines["right"].set_position(("axes", 1.2))
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for sp in ax2.spines.values():
sp.set_visible(False)
ax2.spines["right"].set_visible(True)
ax.set_ylabel("N earthquakes in bin")
ax.set_xlabel("Magnitude")
ax1.set_ylabel("Cumulative events and gradient")
ax2.set_ylabel("Curvature")
fig.legend()
fig.show()
return float(max_curv)
def calc_b_value(magnitudes, completeness, max_mag=None, plotvar=True):
"""
Calculate the b-value for a range of completeness magnitudes.
Calculates a power-law fit to given magnitudes for each completeness
magnitude. Plots the b-values and residuals for the fitted catalogue
against the completeness values. Computes fits using numpy.polyfit,
which uses a least-squares technique.
:type magnitudes: list
:param magnitudes: Magnitudes to compute the b-value for.
:type completeness: list
:param completeness: list of completeness values to compute b-values for.
:type max_mag: float
:param max_mag: Maximum magnitude to attempt to fit in magnitudes.
:type plotvar: bool
:param plotvar: Turn plotting on or off.
:rtype: list
:return: List of tuples of (completeness, b-value, residual,\
number of magnitudes used)
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.mag_calc import calc_b_value
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.magnitudes[0].mag for event in catalog]
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False)
>>> round(b_values[4][1])
1.0
>>> # We can set a maximum magnitude:
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False, max_mag=5)
>>> round(b_values[4][1])
1.0
"""
b_values = []
# Calculate the cdf for all magnitudes
counts = Counter(magnitudes)
cdf = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
cdf[i] = cdf[i - 1] + counts[magnitude]
else:
cdf[i] = counts[magnitude]
if not max_mag:
max_mag = max(magnitudes)
for m_c in completeness:
if m_c >= max_mag or m_c >= max(magnitudes):
Logger.warning('Not computing completeness at %s, above max_mag' %
str(m_c))
break
complete_mags = []
complete_freq = []
for i, mag in enumerate(mag_steps):
if mag >= m_c <= max_mag:
complete_mags.append(mag)
complete_freq.append(np.log10(cdf[i]))
if len(complete_mags) < 4:
Logger.warning('Not computing completeness above ' + str(m_c) +
', fewer than 4 events')
break
fit = np.polyfit(complete_mags, complete_freq, 1, full=True)
# Calculate the residuals according to the Wiemer & Wys 2000 definition
predicted_freqs = [fit[0][1] - abs(fit[0][0] * M)
for M in complete_mags]
r = 100 - ((np.sum([abs(complete_freq[i] - predicted_freqs[i])
for i in range(len(complete_freq))]) * 100) /
np.sum(complete_freq))
b_values.append((m_c, abs(fit[0][0]), r, str(len(complete_mags))))
if plotvar:
fig, ax1 = plt.subplots()
b_vals = ax1.scatter(list(zip(*b_values))[0], list(zip(*b_values))[1],
c='k')
resid = ax1.scatter(list(zip(*b_values))[0],
[100 - b for b in list(zip(*b_values))[2]], c='r')
ax1.set_ylabel('b-value and residual')
plt.xlabel('Completeness magnitude')
ax2 = ax1.twinx()
ax2.set_ylabel('Number of events used in fit')
n_ev = ax2.scatter(list(zip(*b_values))[0], list(zip(*b_values))[3],
c='g')
fig.legend((b_vals, resid, n_ev),
('b-values', 'residuals', 'number of events'),
'lower right')
ax1.set_title('Possible completeness values')
plt.show()
return b_values
# Helpers for local magnitude estimation
# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990
PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j],
'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080}
def dist_calc(loc1, loc2):
"""
Function to calculate the distance in km between two points.
Uses the
`haversine formula <https://en.wikipedia.org/wiki/Haversine_formula>`_
to calculate great circle distance at the Earth's surface, then uses
trig to include depth.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float
"""
from eqcorrscan.utils.libnames import _load_cdll
import ctypes
utilslib = _load_cdll('libutils')
utilslib.dist_calc.argtypes = [
ctypes.c_float, ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_float, ctypes.c_float]
utilslib.dist_calc.restype = ctypes.c_float
dist = utilslib.dist_calc(
float(math.radians(loc1[0])), float(math.radians(loc1[1])),
float(loc1[2]),
float(math.radians(loc2[0])), float(math.radians(loc2[1])),
float(loc2[2]))
return dist
def _sim_WA(trace, inventory, water_level, velocity=False):
"""
Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type inventory: obspy.core.inventory.Inventory
:param inventory:
Inventory containing response information for the stations in st.
:type water_level: float
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace`
"""
assert isinstance(trace, Trace)
paz_wa = copy.deepcopy(PAZ_WA)
# Need to make a copy because we might edit it.
if velocity:
paz_wa['zeros'] = [0 + 0j, 0 + 0j]
# De-trend data
trace.detrend('simple')
# Remove response to Velocity
try:
trace.remove_response(
inventory=inventory, output="VEL", water_level=water_level)
except Exception:
Logger.error(f"No response for {trace.id} at {trace.stats.starttime}")
return None
# Simulate Wood Anderson
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=paz_wa,
water_level=water_level)
return trace
def _max_p2t(data, delta, return_peak_trough=False):
"""
Finds the maximum peak-to-trough amplitude and period.
Originally designed to be used to calculate magnitudes (by
taking half of the peak-to-trough amplitude as the peak amplitude).
:type data: numpy.ndarray
:param data: waveform trace to find the peak-to-trough in.
:type delta: float
:param delta: Sampling interval in seconds
:type return_peak_trough: bool
:param return_peak_trough:
Optionally return the peak and trough
:returns:
tuple of (amplitude, period, time) with amplitude in the same
scale as given in the input data, and period in seconds, and time in
seconds from the start of the data window.
:rtype: tuple
"""
turning_points = [] # A list of tuples of (amplitude, sample)
for i in range(1, len(data) - 1):
if (data[i] < data[i - 1] and data[i] < data[i + 1]) or\
(data[i] > data[i - 1] and data[i] > data[i + 1]):
turning_points.append((data[i], i))
if len(turning_points) >= 1:
amplitudes = np.empty([len(turning_points) - 1],)
half_periods = np.empty([len(turning_points) - 1],)
else:
Logger.warning(
'Turning points has length: ' + str(len(turning_points)) +
' data have length: ' + str(len(data)))
return 0.0, 0.0, 0.0
for i in range(1, len(turning_points)):
half_periods[i - 1] = (delta * (turning_points[i][1] -
turning_points[i - 1][1]))
amplitudes[i - 1] = np.abs(turning_points[i][0] -
turning_points[i - 1][0])
amplitude = np.max(amplitudes)
period = 2 * half_periods[np.argmax(amplitudes)]
delay = delta * turning_points[np.argmax(amplitudes)][1]
if not return_peak_trough:
return amplitude, period, delay
max_position = np.argmax(amplitudes)
peak = max(
t[0] for t in turning_points[max_position: max_position + 2])
trough = min(
t[0] for t in turning_points[max_position: max_position + 2])
return amplitude, period, delay, peak, trough
def _pairwise(iterable):
"""
Wrapper on itertools for SVD_magnitude.
"""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
# Helpers for relative magnitude calculation
def _get_pick_for_station(event, station, use_s_picks):
"""
Get the first reported pick for a given station.
:type event: `obspy.core.event.Event`
:param event: Event with at least picks
:type station: str
:param station: Station to get pick for
:type use_s_picks: bool
:param use_s_picks: Whether to allow S-picks to be returned
:rtype: `obspy.core.event.Pick`
:return: First reported pick for station
"""
picks = [p for p in event.picks if p.waveform_id.station_code == station]
if len(picks) == 0:
Logger.info("No pick for {0}".format(station))
return None
picks.sort(key=lambda p: p.time)
for pick in picks:
if pick.phase_hint and pick.phase_hint[0].upper() == 'S'\
and not use_s_picks:
continue
return pick
Logger.info("No suitable pick found for {0}".format(station))
return None
def _snr(tr, noise_window, signal_window):
"""
Compute ratio of maximum signal amplitude to rms noise amplitude.
:param tr: Trace to compute signal-to-noise ratio for
:param noise_window: (start, end) of window to use for noise
:param signal_window: (start, end) of window to use for signal
:return: Signal-to-noise ratio, noise amplitude
"""
from eqcorrscan.core.template_gen import _rms
noise_amp = _rms(
tr.slice(starttime=noise_window[0], endtime=noise_window[1]).data)
if np.isnan(noise_amp):
Logger.warning("Could not calculate noise with this data, setting "
"to 1")
noise_amp = 1.0
try:
signal_amp = tr.slice(
starttime=signal_window[0], endtime=signal_window[1]).data.max()
except ValueError as e:
Logger.error(e)
return np.nan
return signal_amp / noise_amp
def _get_signal_and_noise(stream, event, seed_id, noise_window,
signal_window, use_s_picks):
"""
Get noise and signal amplitudes and signal standard deviation for an event
on a specific channel.
Noise amplitude is calculated as the RMS amplitude in the noise window,
signal amplitude is the maximum amplitude in the signal window.
"""
from eqcorrscan.core.template_gen import _rms
station = seed_id.split('.')[1]
pick = _get_pick_for_station(
event=event, station=station, use_s_picks=use_s_picks)
if pick is None:
Logger.error("No pick for {0}".format(station))
return None, None, None
tr = stream.select(id=seed_id).merge()
if len(tr) == 0:
return None, None, None
tr = tr[0]
noise_amp = _rms(tr.slice(
starttime=pick.time + noise_window[0],
endtime=pick.time + noise_window[1]).data)
if np.isnan(noise_amp):
noise_amp = None
signal = tr.slice(
starttime=pick.time + signal_window[0],
endtime=pick.time + signal_window[1]).data
if len(signal) == 0:
Logger.debug("No signal data between {0} and {1}".format(
pick.time + signal_window[0], pick.time + signal_window[1]))
Logger.debug(tr)
return noise_amp, None, None
return noise_amp, signal.max(), signal.std()
def relative_amplitude(st1, st2, event1, event2, noise_window=(-20, -1),
signal_window=(-.5, 20), min_snr=5.0,
use_s_picks=False):
"""
Compute the relative amplitudes between two streams.
Uses standard deviation of amplitudes within trace. Relative amplitudes are
computed as:
.. math::
\\frac{std(tr2)}{std(tr1)}
where tr1 is a trace from st1 and tr2 is a matching (seed ids match) trace
from st2. The standard deviation of the amplitudes is computed in the
signal window given. If the ratio of amplitudes between the signal window
and the noise window is below `min_snr` then no result is returned for that
trace. Windows are computed relative to the first pick for that station.
If one stream has insufficient data to estimate noise amplitude, the noise
amplitude of the other will be used.
:type st1: `obspy.core.stream.Stream`
:param st1: Stream for event1
:type st2: `obspy.core.stream.Stream`
:param st2: Stream for event2
:type event1: `obspy.core.event.Event`
:param event1: Event with picks (nothing else is needed)
:type event2: `obspy.core.event.Event`
:param event2: Event with picks (nothing else is needed)
:type noise_window: tuple of float
:param noise_window:
Start and end of noise window in seconds relative to pick
:type signal_window: tuple of float
:param signal_window:
Start and end of signal window in seconds relative to pick
:type min_snr: float
:param min_snr: Minimum signal-to-noise ratio allowed to make a measurement
:type use_s_picks: bool
:param use_s_picks:
Whether to allow relative amplitude estimates to be made from S-picks.
Note that noise and signal windows are relative to pick-times, so using
an S-pick might result in a noise window including P-energy.
:rtype: dict
:return: Dictionary of relative amplitudes keyed by seed-id
"""
seed_ids = {tr.id for tr in st1}.intersection({tr.id for tr in st2})
amplitudes = {}
for seed_id in seed_ids:
noise1, signal1, std1 = _get_signal_and_noise(
stream=st1, event=event1, signal_window=signal_window,
noise_window=noise_window, use_s_picks=use_s_picks,
seed_id=seed_id)
noise2, signal2, std2 = _get_signal_and_noise(
stream=st2, event=event2, signal_window=signal_window,
noise_window=noise_window, use_s_picks=use_s_picks,
seed_id=seed_id)
noise1 = noise1 or noise2
noise2 = noise2 or noise1
if noise1 is None or noise2 is None:
Logger.info("Insufficient data for noise to be estimated for "
"{0}".format(seed_id))
continue
if signal1 is None or signal2 is None:
Logger.info("No signal data found for {0}".format(seed_id))
continue
snr1 = np.nan_to_num(signal1 / noise1)
snr2 = np.nan_to_num(signal2 / noise2)
if snr1 < min_snr or snr2 < min_snr:
Logger.info("SNR (event1: {0:.2f}, event2: {1:.2f} too low "
"for {2}".format(snr1, snr2, seed_id))
continue
ratio = std2 / std1
Logger.debug("Channel: {0} Relative amplitude: {1:.2f}".format(
seed_id, ratio))
amplitudes.update({seed_id: ratio})
return amplitudes
# Magnitude estimation functions
def relative_magnitude(st1, st2, event1, event2, noise_window=(-20, -1),
signal_window=(-.5, 20), min_snr=5.0, min_cc=0.7,
use_s_picks=False, correlations=None, shift=.2,
return_correlations=False, weight_by_correlation=True):
"""
Compute the relative magnitudes between two events.
See :func:`eqcorrscan.utils.mag_calc.relative_amplitude` for information
on how relative amplitudes are calculated. To compute relative magnitudes
from relative amplitudes this function weights the amplitude ratios by
the cross-correlation of the two events. The relation used is similar to
Schaff and Richards (2014) and is:
.. math::
\\Delta m = \\log{\\frac{std(tr2)}{std(tr1)}} \\times CC
:type st1: `obspy.core.stream.Stream`
:param st1: Stream for event1
:type st2: `obspy.core.stream.Stream`
:param st2: Stream for event2
:type event1: `obspy.core.event.Event`
:param event1: Event with picks (nothing else is needed)
:type event2: `obspy.core.event.Event`
:param event2: Event with picks (nothing else is needed)
:type noise_window: tuple of float
:param noise_window:
Start and end of noise window in seconds relative to pick
:type signal_window: tuple of float
:param signal_window:
Start and end of signal window in seconds relative to pick
:type min_snr: float
:param min_snr: Minimum signal-to-noise ratio allowed to make a measurement
:type min_cc: float
:param min_cc:
Minimum inter-event correlation (between -1 and 1) allowed to make a
measurement.
:type use_s_picks: bool
:param use_s_picks:
Whether to allow relative amplitude estimates to be made from S-picks.
Note that noise and signal windows are relative to pick-times, so using
an S-pick might result in a noise window including P-energy.
:type correlations: dict
:param correlations:
Pre-computed dictionary of correlations keyed by seed-id. If None
(default) then correlations will be computed for the provided data in
the `signal_window`.
:type shift: float
:param shift:
Shift length for correlations in seconds - maximum correlation within
a window between +/- shift of the P-pick will be used to weight the
magnitude.
:type return_correlations: bool
:param return_correlations:
If true will also return maximum correlations as a dictionary.
:type weight_by_correlation: bool
:param weight_by_correlation:
Whether to weight the magnitude by the correlation or not.
:rtype: dict
:return: Dictionary of relative magnitudes keyed by seed-id
"""
import math
from obspy.signal.cross_correlation import correlate
relative_magnitudes = {}
compute_correlations = False
if correlations is None:
correlations = {}
compute_correlations = True
relative_amplitudes = relative_amplitude(
st1=st1, st2=st2, event1=event1, event2=event2,
noise_window=noise_window, signal_window=signal_window,
min_snr=min_snr, use_s_picks=use_s_picks)
for seed_id, amplitude_ratio in relative_amplitudes.items():
tr1 = st1.select(id=seed_id)[0]
tr2 = st2.select(id=seed_id)[0]
pick1 = _get_pick_for_station(
event=event1, station=tr1.stats.station, use_s_picks=use_s_picks)
pick2 = _get_pick_for_station(
event=event2, station=tr2.stats.station, use_s_picks=use_s_picks)
if weight_by_correlation:
if compute_correlations:
cc = correlate(
tr1.slice(
starttime=pick1.time + signal_window[0],
endtime=pick1.time + signal_window[1]),
tr2.slice(
starttime=pick2.time + signal_window[0],
endtime=pick2.time + signal_window[1]),
shift=int(shift * tr1.stats.sampling_rate))
cc = cc.max()
correlations.update({seed_id: cc})
else:
cc = correlations.get(seed_id, 0.0)
if cc < min_cc:
continue
else:
cc = 1.0
# Weight and add to relative_magnitudes
rel_mag = math.log10(amplitude_ratio) * cc
Logger.debug("Channel: {0} Magnitude change {1:.2f}".format(
tr1.id, rel_mag))
relative_magnitudes.update({seed_id: rel_mag})
if return_correlations:
return relative_magnitudes, correlations
return relative_magnitudes
def amp_pick_event(event, st, inventory, chans=('Z',), var_wintype=True,
winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0,
highcut=20.0, corners=4, min_snr=1.0, plot=False,
remove_old=False, ps_multiplier=0.34, velocity=False,
water_level=0, iaspei_standard=False):
"""
Pick amplitudes for local magnitude for a single event.
Looks for maximum peak-to-trough amplitude for a channel in a stream, and
picks this amplitude and period. There are a few things it does
internally to stabilise the result:
1. Applies a given filter to the data using obspy's bandpass filter.
The filter applied is a time-domain digital SOS filter.
This is often necessary for small magnitude earthquakes. To correct
for this filter later the gain of the filter at the period of the
maximum amplitude is retrieved using scipy's sosfreqz, and used to
divide the resulting picked amplitude.
2. Picks the peak-to-trough amplitude, but records half of this to
cope with possible DC offsets.
3. The maximum amplitude within the given window is picked. Care must
be taken to avoid including surface waves in the window;
4. A variable window-length is used by default that takes into account
P-S times if available, this is in an effort to include only the
body waves. When P-S times are not available the ps_multiplier
variable is used, which defaults to 0.34 x hypocentral distance.
:type event: obspy.core.event.event.Event
:param event: Event to pick
:type st: obspy.core.stream.Stream
:param st: Stream associated with event
:type inventory: obspy.core.inventory.Inventory
:param inventory:
Inventory containing response information for the stations in st.
:type chans: tuple
:param chans:
Tuple of the components to pick on, e.g. (Z, 1, 2, N, E)
:type var_wintype: bool
:param var_wintype:
If True, the winlen will be multiplied by the P-S time if both P and
S picks are available, otherwise it will be multiplied by the
hypocentral distance*ps_multiplier, defaults to True
:type winlen: float
:param winlen:
Length of window, see above parameter, if var_wintype is False then
this will be in seconds, otherwise it is the multiplier to the
p-s time, defaults to 0.9.
:type pre_pick: float
:param pre_pick:
Time before the s-pick to start the cut window, defaults to 0.2.
:type pre_filt: bool
:param pre_filt: To apply a pre-filter or not, defaults to True
:type lowcut: float
:param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0
:type highcut: float
:param highcut: Highcut in Hz for the pre-filter, defaults to 20.0
:type corners: int
:param corners: Number of corners to use in the pre-filter
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio to allow a pick - see note below on
signal-to-noise ratio calculation.
:type plot: bool
:param plot: Turn plotting on or off.
:type remove_old: bool
:param remove_old:
If True, will remove old amplitudes and associated picks from event
and overwrite with new picks. Defaults to False.
:type ps_multiplier: float
:param ps_multiplier:
A p-s time multiplier of hypocentral distance - defaults to 0.34,
based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately
chosen to be quite slow.
:type velocity: bool
:param velocity:
Whether to make the pick in velocity space or not. Original definition
of local magnitude used displacement of Wood-Anderson, MLv in seiscomp
and Antelope uses a velocity measurement. *velocity and iaspei_standard
are mutually exclusive*.
:type water_level: float
:param water_level:
Water-level for seismometer simulation, see
https://docs.obspy.org/packages/autogen/obspy.core.trace.Trace.remove_response.html
:type iaspei_standard: bool
:param iaspei_standard:
Whether to output amplitude in IASPEI standard IAML (wood-anderson
static amplification of 1), or AML with wood-anderson static
amplification of 2080. Note: Units are SI (and specified in the
amplitude)
:returns: Picked event
:rtype: :class:`obspy.core.event.Event`
.. Note::
Signal-to-noise ratio is calculated using the filtered data by
dividing the maximum amplitude in the signal window (pick window)
by the normalized noise amplitude (taken from the whole window
supplied).
.. Note::
With `iaspei_standard=False`, picks will be returned in SI units
(m or m/s), with the standard Wood-Anderson sensitivity of 2080 applied
such that the measurements reflect the amplitude measured on a Wood
Anderson instrument, as per the original local magnitude definitions
of Richter and others.
"""
if iaspei_standard and velocity:
raise NotImplementedError("Velocity is not IASPEI standard for IAML.")
try:
event_origin = event.preferred_origin() or event.origins[0]
except IndexError:
event_origin = Origin()
depth = event_origin.depth
if depth is None:
Logger.warning("No depth for the event, setting to 0 km")
depth = 0
# Remove amplitudes and picks for those amplitudes - this is not always
# safe: picks may not be exclusively linked to amplitudes - hence the
# default is *not* to do this.
if remove_old and event.amplitudes:
removal_ids = {amp.pick_id for amp in event.amplitudes}
event.picks = [
p for p in event.picks if p.resource_id not in removal_ids]
event.amplitudes = []
# We just want to look at P and S picks.
picks = [p for p in event.picks
if p.phase_hint and p.phase_hint[0].upper() in ("P", "S")]
if len(picks) == 0:
Logger.warning('No P or S picks found')
return event
st = st.copy().merge() # merge the data, just in case! Work on a copy.
# For each station cut the window
for sta in {p.waveform_id.station_code for p in picks}:
for chan in chans:
Logger.info(f'Working on {sta} {chan}')
tr = st.select(station=sta, component=chan)
if not tr:
Logger.warning(f'{sta} {chan} not found in the stream.')
continue
tr = tr.merge()[0]
# Apply the pre-filter
if pre_filt:
tr = tr.split().detrend('simple').merge(fill_value=0)[0]
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut,
corners=corners)
tr = _sim_WA(tr, inventory, water_level=water_level,
velocity=velocity)
if tr is None: # None returned when no matching response is found
continue
# Get the distance from an appropriate arrival
sta_picks = [p for p in picks if p.waveform_id.station_code == sta]
distances = []
for pick in sta_picks:
distances += [
a.distance for a in event_origin.arrivals
if a.pick_id == pick.resource_id and
a.distance is not None]
if len(distances) == 0:
Logger.error(f"Arrivals for station: {sta} do not contain "
"distances. Have you located this event?")
hypo_dist = None
else:
# They should all be the same, but take the mean to be sure...
distance = sum(distances) / len(distances)
hypo_dist = np.sqrt(
np.square(degrees2kilometers(distance)) +
np.square(depth / 1000))
# Get the earliest P and S picks on this station
phase_picks = {"P": None, "S": None}
for _hint in phase_picks.keys():
_picks = sorted(
[p for p in sta_picks if p.phase_hint[0].upper() == _hint],
key=lambda p: p.time)
if len(_picks) > 0:
phase_picks[_hint] = _picks[0]
p_pick = phase_picks["P"]
s_pick = phase_picks["S"]
# Get the window size.
if var_wintype:
if p_pick and s_pick:
p_time, s_time = p_pick.time, s_pick.time
elif s_pick and hypo_dist:
s_time = s_pick.time
p_time = s_time - (hypo_dist * ps_multiplier)
elif p_pick and hypo_dist:
p_time = p_pick.time
s_time = p_time + (hypo_dist * ps_multiplier)
elif (s_pick or p_pick) and hypo_dist is None:
Logger.error(
"No hypocentral distance and no matching P and S "
f"picks for {sta}, skipping.")
continue
else:
raise NotImplementedError(
"No p or s picks - you should not have been able to "
"get here")
trim_start = s_time - pre_pick
trim_end = s_time + (s_time - p_time) * winlen
# Work out the window length based on p-s time or distance
else: # Fixed window-length
if s_pick:
s_time = s_pick.time
elif p_pick and hypo_dist:
# In this case, there is no S-pick and the window length is
# fixed we need to calculate an expected S_pick based on
# the hypocentral distance, this will be quite hand-wavey
# as we are not using any kind of velocity model.
s_time = p_pick.time + hypo_dist * ps_multiplier
else:
Logger.warning(
"No s-pick or hypocentral distance to predict "
f"s-arrival for station {sta}, skipping")
continue
trim_start = s_time - pre_pick
trim_end = s_time + winlen
tr = tr.trim(trim_start, trim_end)
if len(tr.data) <= 10:
Logger.warning(f'Insufficient data for {sta}')
continue
# Get the amplitude
try:
amplitude, period, delay, peak, trough = _max_p2t(
tr.data, tr.stats.delta, return_peak_trough=True)
except ValueError as e:
Logger.error(e)
Logger.error(f'No amplitude picked for tr {tr.id}')
continue
# Calculate the normalized noise amplitude
snr = amplitude / np.sqrt(np.mean(np.square(tr.data)))
if amplitude == 0.0:
continue
if snr < min_snr:
Logger.info(
f'Signal to noise ratio of {snr} is below threshold.')
continue
if plot:
plt.plot(np.arange(len(tr.data)), tr.data, 'k')
plt.scatter(tr.stats.sampling_rate * delay, peak)
plt.scatter(tr.stats.sampling_rate * (delay + period / 2),
trough)
plt.show()
Logger.info(f'Amplitude picked: {amplitude}')
Logger.info(f'Signal-to-noise ratio is: {snr}')
# Note, amplitude should be in meters at the moment!
# Remove the pre-filter response
if pre_filt:
# Generate poles and zeros for the filter we used earlier.
# We need to get the gain for the digital SOS filter used by
# obspy.
sos = iirfilter(
corners, [lowcut / (0.5 * tr.stats.sampling_rate),
highcut / (0.5 * tr.stats.sampling_rate)],
btype='band', ftype='butter', output='sos')
_, gain = sosfreqz(sos, worN=[1 / period],
fs=tr.stats.sampling_rate)
gain = np.abs(gain[0]) # Convert from complex to real.
if gain < 1e-2:
Logger.warning(
f"Pick made outside stable pass-band of filter "
f"on {tr.id}, rejecting")
continue
amplitude /= gain
Logger.debug(f"Removed filter gain: {gain}")
# Write out the half amplitude, approximately the peak amplitude as
# used directly in magnitude calculations
amplitude *= 0.5
# Documentation standards
module = _sim_WA.__module__
fname = currentframe().f_code.co_name
# This is here to ensure that if the function name changes this
# is still correct
method_id = ResourceIdentifier(
id=f"{module}.{fname}",
prefix=f"smi:eqcorrscan{eqcorrscan.__version__}")
filter_id = ResourceIdentifier(
id=f"{module}._sim_WA",
prefix=f"smi:eqcorrscan{eqcorrscan.__version__}")
if iaspei_standard:
# Remove wood-anderson amplification
units, phase_hint, amplitude_type = (
"m", "IAML", "IAML")
# amplitude *= 10 ** 9 # *THIS IS NOT SUPPORTED BY QML*
amplitude /= PAZ_WA["sensitivity"] # Remove WA sensitivity
# Set the filter ID to state that sensitivity was removed
filter_id = ResourceIdentifier(
id=f"{module}._sim_WA.WA_sensitivity_removed",
prefix=f"smi:eqcorrscan{eqcorrscan.__version__}")
else: # Not IAML, use SI units.
if velocity:
units, phase_hint, amplitude_type = (
"m/s", "AML", "AML")
else:
units, phase_hint, amplitude_type = (
"m", "AML", "AML")
if tr.stats.channel.endswith("Z"):
magnitude_hint = "MLv"
# MLv is ML picked on the vertical channel
else:
magnitude_hint = "ML"
# Append an amplitude reading to the event
_waveform_id = WaveformStreamID(
station_code=tr.stats.station, channel_code=tr.stats.channel,
network_code=tr.stats.network)
pick = Pick(
waveform_id=_waveform_id, phase_hint=phase_hint,
polarity='undecidable', time=tr.stats.starttime + delay,
evaluation_mode='automatic',
method_id=method_id, filter_id=filter_id)
event.picks.append(pick)
event.amplitudes.append(Amplitude(
generic_amplitude=amplitude, period=period,
pick_id=pick.resource_id, waveform_id=pick.waveform_id,
unit=units, magnitude_hint=magnitude_hint,
type=amplitude_type, category='point', method_id=method_id,
filter_id=filter_id))
return event
def svd_moments(u, s, v, stachans, event_list, n_svs=2):
"""
Calculate relative moments/amplitudes using singular-value decomposition.
Convert basis vectors calculated by singular value decomposition (see the
SVD functions in clustering) into relative moments.
For more information see the paper by
`Rubinstein & Ellsworth (2010).
<http://www.bssaonline.org/content/100/5A/1952.short>`_
:type u: list
:param u:
List of the :class:`numpy.ndarray` input basis vectors from the SVD,
one array for each channel used.
:type s: list
:param s:
List of the :class:`numpy.ndarray` of singular values, one array for
each channel.
:type v: list
:param v:
List of :class:`numpy.ndarray` of output basis vectors from SVD, one
array per channel.
:type stachans: list
:param stachans: List of station.channel input
:type event_list: list
:param event_list:
List of events for which you have data, such that event_list[i]
corresponds to stachans[i], U[i] etc. and event_list[i][j] corresponds
to event j in U[i]. These are a series of indexes that map the basis
vectors to their relative events and channels - if you have every
channel for every event generating these is trivial (see example).
:type n_svs: int
:param n_svs: Number of singular values to use, defaults to 4.
:returns: M, array of relative moments
:rtype: :class:`numpy.ndarray`
:returns: events_out, list of events that relate to M (in order), \
does not include the magnitude information in the events, see note.
:rtype: :class:`obspy.core.event.event.Event`
.. note:: M is an array of relative moments (or amplitudes), these cannot
be directly compared to true moments without calibration.
.. note:: When comparing this method with the method used for creation
of subspace detectors (Harris 2006) it is important to note that the
input `design set` matrix in Harris contains waveforms as columns,
whereas in Rubinstein & Ellsworth it contains waveforms as rows
(i.e. the transpose of the Harris data matrix). The U and V matrices
are therefore swapped between the two approaches. This is accounted
for in EQcorrscan but may lead to confusion when reviewing the code.
Here we use the Harris approach.
.. rubric:: Example
>>> from eqcorrscan.utils.mag_calc import svd_moments
>>> from obspy import read
>>> import glob
>>> import os
>>> from eqcorrscan.utils.clustering import svd
>>> import numpy as np
>>> # Do the set-up
>>> testing_path = 'eqcorrscan/tests/test_data/similar_events_processed'
>>> stream_files = glob.glob(os.path.join(testing_path, '*'))
>>> stream_list = [read(stream_file) for stream_file in stream_files]
>>> event_list = []
>>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')]
>>> for i, stream in enumerate(stream_list):
... st_list = []
... for tr in stream:
... if (tr.stats.station, tr.stats.channel) not in remove_list:
... stream.remove(tr)
... continue
... st_list.append(i)
... event_list.append(st_list) # doctest: +SKIP
>>> event_list = np.asarray(event_list).T.tolist()
>>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP
['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1']
>>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans,
... event_list=event_list) # doctest: +SKIP
"""
Logger.critical(
"Proceed with caution: this function is experimental and somewhat"
" stochastic - you should run this multiple times to ensure you get"
" a stable result.")
# Define maximum number of events, will be the width of K
K_width = max([max(ev_list) for ev_list in event_list]) + 1
# Sometimes the randomisation generates a singular matrix - rather than
# attempting to regulerize this matrix I propose undertaking the
# randomisation step a further time
if len(stachans) == 1:
Logger.critical('Only provided data from one station-channel - '
'will not try to invert')
return u[0][:, 0], event_list[0]
for i, stachan in enumerate(stachans):
k = [] # Small kernel matrix for one station - channel
# Copy the relevant vectors so as not to destroy them
# Here we'll swap into the Rubinstein U and V matrices
U_working = copy.deepcopy(v[i].T)
V_working = copy.deepcopy(u[i])
s_working = copy.deepcopy(s[i].T)
ev_list = event_list[i]
if len(ev_list) > len(U_working):
Logger.error('U is : ' + str(U_working.shape))
Logger.error('ev_list is len %s' % str(len(ev_list)))
f_dump = open('mag_calc_U_working.pkl', 'wb')
pickle.dump(U_working, f_dump)
f_dump.close()
raise IOError('More events than represented in U')
# Set all non-important singular values to zero
s_working[n_svs:len(s_working)] = 0
s_working = np.diag(s_working)
# Convert to numpy matrices
U_working = np.matrix(U_working)
V_working = np.matrix(V_working)
s_working = np.matrix(s_working)
SVD_weights = U_working[:, 0]
# If all the weights are negative take the abs
if np.all(SVD_weights < 0):
Logger.warning('All weights are negative - flipping them')
SVD_weights = np.abs(SVD_weights)
SVD_weights = np.array(SVD_weights).reshape(-1).tolist()
# Shuffle the SVD_weights prior to pairing - will give one of multiple
# pairwise options - see p1956 of Rubinstein & Ellsworth 2010
# We need to keep the real indexes though, otherwise, if there are
# multiple events with the same weight we will end up with multiple
# -1 values
random_SVD_weights = np.copy(SVD_weights)
# Tack on the indexes
random_SVD_weights = random_SVD_weights.tolist()
random_SVD_weights = [(random_SVD_weights[_i], _i)
for _i in range(len(random_SVD_weights))]
random.shuffle(random_SVD_weights)
# Add the first element to the end so all elements will be paired twice
random_SVD_weights.append(random_SVD_weights[0])
# Take pairs of all the SVD_weights (each weight appears in 2 pairs)
pairs = []
for pair in _pairwise(random_SVD_weights):
pairs.append(pair)
# Deciding values for each place in kernel matrix using the pairs
for pairsIndex in range(len(pairs)):
# We will normalize by the minimum weight
_weights = list(zip(*list(pairs[pairsIndex])))[0]
_indeces = list(zip(*list(pairs[pairsIndex])))[1]
min_weight = min(np.abs(_weights))
max_weight = max(np.abs(_weights))
min_index = _indeces[np.argmin(np.abs(_weights))]
max_index = _indeces[np.argmax(np.abs(_weights))]
row = []
# Working out values for each row of kernel matrix
for j in range(len(SVD_weights)):
if j == max_index:
result = -1
elif j == min_index:
normalised = max_weight / min_weight
result = float(normalised)
else:
result = 0
row.append(result)
# Add each row to the K matrix
k.append(row)
# k is now a square matrix, we need to flesh it out to be K_width
k_filled = np.zeros([len(k), K_width])
for j in range(len(k)):
for l, ev in enumerate(ev_list):
k_filled[j, ev] = k[j][l]
if 'K' not in locals():
K = k_filled
else:
K = np.concatenate([K, k_filled])
# Remove any empty rows
K_nonempty = []
events_out = []
for i in range(0, K_width):
if not np.all(K[:, i] == 0):
K_nonempty.append(K[:, i])
events_out.append(i)
K = np.array(K_nonempty).T
K = K.tolist()
K_width = len(K[0])
# Add an extra row to K, so average moment = 1
K.append(np.ones(K_width) * (1. / K_width))
Logger.debug("Created Kernel matrix: ")
del row
Logger.debug('\n'.join([''.join([str(round(float(item), 3)).ljust(6)
for item in row]) for row in K]))
Krounded = np.around(K, decimals=4)
# Create a weighting matrix to put emphasis on the final row.
W = np.matrix(np.identity(len(K)))
# the final element of W = the number of stations*number of events
W[-1, -1] = len(K) - 1
# Make K into a matrix
K = np.matrix(K)
############
# Solve using the weighted least squares equation, K.T is K transpose
Kinv = np.array(np.linalg.inv(K.T * W * K) * K.T * W)
# M are the relative moments of the events
M = Kinv[:, -1]
# XXX TODO This still needs an outlier removal step
return M, events_out
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
michaelbramwell/sms-tools | lectures/08-Sound-transformations/plots-code/stftMorph-orchestra.py | 18 | 2053 | import numpy as np
import time, os, sys
from scipy.signal import hamming, resample
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import dftModel as DFT
import utilFunctions as UF
import stftTransformations as STFTT
import stochasticModel as STOC
import math
import stft as STFT
(fs, x1) = UF.wavread('../../../sounds/orchestra.wav')
(fs, x2) = UF.wavread('../../../sounds/speech-male.wav')
w1 = np.hamming(1024)
N1 = 1024
H1 = 256
w2 = np.hamming(1024)
N2 = 1024
smoothf = .2
balancef = 0.5
y = STFTT.stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef)
mX2 = STOC.stochasticModelAnal(x2,H1,H1*2, smoothf)
mX,pX = STFT.stftAnal(x1, fs, w1, N1, H1)
mY,pY = STFT.stftAnal(y, fs, w1, N1, H1)
maxplotfreq = 10000.0
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N1*maxplotfreq/fs+1]))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
numFrames = int(mX2[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
N = 2*mX2[0,:].size
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX2[:,:N*maxplotfreq/fs+1]))
plt.title('mX2 (speech-male.wav)')
plt.autoscale(tight=True)
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:N1*maxplotfreq/fs+1]))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-speech-stftMorph.wav')
plt.savefig('stftMorph-orchestra.png')
plt.show()
| agpl-3.0 |
Akshay0724/scikit-learn | sklearn/tests/test_multioutput.py | 23 | 12429 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_mutli_output_classifiation_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
| bsd-3-clause |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 46 | 6682 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
MohammedWasim/Data-Science-45min-Intros | support-vector-machines-101/svm-example.py | 26 | 2219 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
# cobbled together from refs:
# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html
# http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane.html
if len(sys.argv) > 1:
samples = int( sys.argv[1] )
c_std=2.0
else:
samples = 10
c_std=1.0
X, y = make_blobs(n_samples=samples, cluster_std=c_std, centers=2)
# make a plotting grid
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# svm
clf = SVC(kernel='linear').fit(X, y)
# predict all points in grid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# separating plane and margins
w = clf.coef_[0]
a = -w[0] / w[1]
xxx = np.linspace(x_min, x_max)
yyy = a * xxx - (clf.intercept_[0]) / w[1]
# calculate the large margin boundaries defined by the support vectors
b = clf.support_vectors_[0]
yyy_down = a * xxx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yyy_up = a * xxx + (b[1] - a * b[0])
# plot margins
plt.figure(figsize=(8,6))
plt.plot(xxx, yyy, 'k-', linewidth=1)
plt.plot(xxx, yyy_down, 'k--', linewidth=1)
plt.plot(xxx, yyy_up, 'k--', linewidth=1)
# plot decision contours
Z = Z.reshape(xx.shape)
#plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.contourf(xx, yy, Z, alpha=0.25)
# plot data
plt.scatter(X[:, 0], X[:, 1],
s=100,
c=y,
alpha=0.8,
cmap=plt.cm.Paired
)
# plot support vectors
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300,
facecolors='none'
)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('x')
plt.ylabel('y')
# SHOW ALL THE THINGS
plt.show()
| unlicense |
hannwoei/paparazzi | sw/tools/calibration/calibration_utils.py | 19 | 9087 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
n = (max_meas + min_meas) / 2
sf = 2*scale/(max_meas - min_meas)
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
def plot_results(block, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref):
"""Plot calibration results."""
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.xlabel('time (s)')
plt.ylabel('ADC')
plt.title('Raw sensors')
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if block:
plt.show()
else:
plt.draw()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
loli/sklearn-ensembletrees | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
CVML/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/sklearn/ensemble/gradient_boosting.py | 1 | 40371 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD Style.
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
import sys
import warnings
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, array2d, check_arrays
from ..utils.extmath import logsumexp
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import _random_sample_mask
from ..tree._tree import DTYPE, TREE_LEAF
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0)")
self.alpha = alpha
def fit(self, X, y):
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y):
self.mean = np.mean(y)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
def fit(self, X, y):
n_pos = np.sum(y)
self.prior = np.log(n_pos / (y.shape[0] - n_pos))
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y):
class_counts = np.bincount(y)
self.priors = class_counts / float(y.shape[0])
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class LossFunction(object):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
__metaclass__ = ABCMeta
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self, X, y):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k])
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(LossFunction):
"""Base class for regression loss functions. """
__metaclass__ = ABCMeta
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression")
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred):
return np.mean((y - pred.ravel()) ** 2.0)
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
return np.abs(y - pred.ravel()).mean()
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
tree.value[leaf, 0, 0] = np.median(y.take(terminal_region, axis=0) -
pred.take(terminal_region, axis=0))
class HuberLossFunction(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
gamma_mask = np.abs(diff) <= gamma
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
return (sq_loss + lin_loss) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
pred = pred.ravel()
diff = y - pred
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
gamma = self.gamma
diff = y.take(terminal_region, axis=0) - \
pred.take(terminal_region, axis=0)
median = np.median(diff)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
return (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
diff = y.take(terminal_region, axis=0) - \
pred.take(terminal_region, axis=0)
val = stats.scoreatpercentile(diff, self.percentile)
tree.value[leaf, 0] = val
class BinomialDeviance(LossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("%s requires 2 classes." %
self.__class__.__name__)
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred):
"""Compute the deviance (= negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
return np.sum(np.logaddexp(0.0, -2 * y * pred)) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
denominator = np.sum((y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
class MultinomialDeviance(LossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("%s requires more than 2 classes."
% self.__class__.__name__)
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
numerator *= (self.K - 1) / self.K
denominator = np.sum((y - residual) * (1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'bdeviance': BinomialDeviance,
'mdeviance': MultinomialDeviance,
'deviance': None} # for both, multinomial and binomial
class BaseGradientBoosting(BaseEnsemble):
"""Abstract base class for Gradient Boosting. """
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, learn_rate=None):
if not learn_rate is None:
learning_rate = learn_rate
warnings.warn(
"Parameter learn_rate has been renamed to "
'learning_rate'" and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, X_argsorted, y, y_pred, sample_mask):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion="mse",
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_density=self.min_density,
max_features=self.max_features,
compute_importances=False,
random_state=self.random_state)
tree.fit(X, residual, sample_mask, X_argsorted, check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_mask, self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features. Use fortran-style
to avoid memory copies.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
# Check input
X, y = check_arrays(X, y, sparse_format='dense')
X = np.asfortranarray(X, dtype=DTYPE)
y = np.ravel(y, order='C')
# Check parameters
n_samples, n_features = X.shape
self.n_features = n_features
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0")
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0")
if self.loss not in LOSS_FUNCTIONS:
raise ValueError("Loss '%s' not supported. " % self.loss)
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be larger than 0")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be larger than 0")
if self.subsample <= 0.0 or self.subsample > 1:
raise ValueError("subsample must be in (0,1]")
if self.max_features is None:
self.max_features = n_features
if not (0 < self.max_features <= n_features):
raise ValueError("max_features must be in (0, n_features]")
if self.max_depth <= 0:
raise ValueError("max_depth must be larger than 0")
if self.init is not None:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init must be valid estimator")
else:
self.init = self.loss_.init_estimator()
if not (0.0 < self.alpha and self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0)")
self.random_state = check_random_state(self.random_state)
# use default min_density (0.1) only for deep trees
self.min_density = 0.0 if self.max_depth < 6 else 0.1
# create argsorted X for fast tree induction
X_argsorted = np.asfortranarray(
np.argsort(X.T, axis=1).astype(np.int32).T)
# fit initial model
self.init.fit(X, y)
# init predictions
y_pred = self.init.predict(X)
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
self.oob_score_ = np.zeros((self.n_estimators), dtype=np.float64)
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
self.random_state = check_random_state(self.random_state)
# perform boosting iterations
for i in range(self.n_estimators):
# subsampling
if self.subsample < 1.0:
# TODO replace with ``np.choice`` if possible.
sample_mask = _random_sample_mask(n_samples, n_inbag,
self.random_state)
# fit next stage of trees
y_pred = self._fit_stage(i, X, X_argsorted, y, y_pred, sample_mask)
# track deviance (= loss)
if self.subsample < 1.0:
self.train_score_[i] = self.loss_(y[sample_mask],
y_pred[sample_mask])
self.oob_score_[i] = self.loss_(y[~sample_mask],
y_pred[~sample_mask])
if self.verbose > 1:
print("built tree %d of %d, train score = %.6e, "
"oob score = %.6e" % (i + 1, self.n_estimators,
self.train_score_[i],
self.oob_score_[i]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = self.loss_(y, y_pred)
if self.verbose > 1:
print("built tree %d of %d, train score = %.6e" %
(i + 1, self.n_estimators, self.train_score_[i]))
if self.verbose == 1:
print(end='.')
sys.stdout.flush()
return self
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be %d, not %d." %
(self.n_features, X.shape[1]))
score = self.init.predict(X).astype(np.float64)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order='C')
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order='C')
score = self._init_decision_function(X)
for i in range(self.n_estimators):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(
tree.tree_.compute_feature_importances(method='gini')
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, None, optional (default=None)
The number of features to consider when looking for the best split.
Features are choosen randomly at each split point.
If None, then `max_features=n_features`. Choosing
`max_features < n_features` leads to a reduction of variance
and an increase in bias.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints '.' for every tree built.
If greater than 1 then it prints the score for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier().fit(samples, labels)
>>> print(gb.predict([[0.5, 0, 0]]))
[0]
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=1, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0, learn_rate=None):
super(GradientBoostingClassifier, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, verbose=verbose, learn_rate=learn_rate)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features. Use fortran-style
to avoid memory copies.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
y = np.searchsorted(self.classes_, y)
if self.loss == 'deviance':
self.loss = 'mdeviance' if len(self.classes_) > 2 else 'bdeviance'
return super(GradientBoostingClassifier, self).fit(X, y)
def _score_to_proba(self, score):
"""Compute class probability estimates from decision scores. """
proba = np.ones((score.shape[0], self.n_classes_), dtype=np.float64)
if not self.loss_.is_multi_class:
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
else:
proba = np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
return proba
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
score = self.decision_function(X)
return self._score_to_proba(score)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
proba = self.predict_proba(X)
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
def staged_predict(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for proba in self.staged_predict_proba(X):
yield self.classes_.take(np.argmax(proba, axis=1), axis=0)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function soley based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, None, optional (default=None)
The number of features to consider when looking for the best split.
Features are choosen randomly at each split point.
If None, then `max_features=n_features`. Choosing
`max_features < n_features` leads to a reduction of variance
and an increase in bias.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints '.' for every tree built.
If greater than 1 then it prints the score for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> gb = GradientBoostingRegressor().fit(samples, labels)
>>> print(gb.predict([[0, 0, 0]]))
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[ 1.32806...
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=1, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, learn_rate=None):
super(GradientBoostingRegressor, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha, verbose, learn_rate=learn_rate)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features. Use fortran-style
to avoid memory copies.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
self.n_classes_ = 1
return super(GradientBoostingRegressor, self).fit(X, y)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| unlicense |
mikebenfield/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/extension/base/interface.py | 3 | 2284 | import numpy as np
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes.dtypes import ExtensionDtype
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseInterfaceTests(BaseExtensionTests):
"""Tests that the basic interface is satisfied."""
# ------------------------------------------------------------------------
# Interface
# ------------------------------------------------------------------------
def test_len(self, data):
assert len(data) == 100
def test_ndim(self, data):
assert data.ndim == 1
def test_can_hold_na_valid(self, data):
# GH-20761
assert data._can_hold_na is True
def test_memory_usage(self, data):
s = pd.Series(data)
result = s.memory_usage(index=False)
assert result == s.nbytes
def test_array_interface(self, data):
result = np.array(data)
assert result[0] == data[0]
result = np.array(data, dtype=object)
expected = np.array(list(data), dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_is_extension_array_dtype(self, data):
assert is_extension_array_dtype(data)
assert is_extension_array_dtype(data.dtype)
assert is_extension_array_dtype(pd.Series(data))
assert isinstance(data.dtype, ExtensionDtype)
def test_no_values_attribute(self, data):
# GH-20735: EA's with .values attribute give problems with internal
# code, disallowing this for now until solved
assert not hasattr(data, 'values')
assert not hasattr(data, '_values')
def test_is_numeric_honored(self, data):
result = pd.Series(data)
assert result._data.blocks[0].is_numeric is data.dtype._is_numeric
def test_isna_extension_array(self, data_missing):
# If your `isna` returns an ExtensionArray, you must also implement
# _reduce. At the *very* least, you must implement any and all
na = data_missing.isna()
if is_extension_array_dtype(na):
assert na._reduce('any')
assert na.any()
assert not na._reduce('all')
assert not na.all()
assert na.dtype._is_boolean
| bsd-3-clause |
soravux/pms | pms.py | 1 | 7603 | #!/usr/bin/env python
import argparse
import json
import pickle
import numpy as np
from scipy.misc import imread
from scipy import sparse
from scipy import optimize
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import mesh
def getImage(filename):
"""Open image file in greyscale mode (intensity)."""
return imread(filename, flatten=True)
def getLightning(filename):
"""Open JSON-formatted lightning file."""
with open(filename, 'r') as fhdl:
retVal = json.load(fhdl)
return retVal
def photometricStereo(lightning_filename, images_filenames):
"""Based on Woodham '79 article.
I = Matrix of input images, rows being different images.
N = lightning vectors
N_i = inverse of N
rho = albedo of each pixels
"""
lightning = getLightning(lightning_filename)
images = list(map(getImage, images_filenames))
n = len(images_filenames)
I = np.vstack(x.ravel() for x in images)
output = np.zeros((3, I.shape[1]))
N = np.vstack(lightning[x] for x in images_filenames)
N_i = np.linalg.pinv(N)
rho = np.linalg.norm(N_i.dot( I ), axis=0)
I = I / rho
normals, residual, rank, s = np.linalg.lstsq(N, I[:, rho != 0].reshape(n, -1))
output[:,rho != 0] = normals
w, h = images[0].shape
output = output.reshape(3, w, h).swapaxes(0, 2)
# TODO: Raise an error on misbehavior of lstsq.
return output
def photometricStereoWithoutLightning(images_filenames):
"""Based on Basri and al 2006 article."""
images = list(map(getImage, images_filenames))
f = len(images_filenames)
n = images[0].size
w, h = images[0].shape
# Comments are taken directly from Basri and al, 2006
# Begin with a set of images, each composing a row of the matrix M
M = np.vstack(x.ravel() for x in images)
# Using SVD M= U \delta V^T, factor M = \widetilde{L} \widetilde{S}, where
# \widetilde{L} = U \sqrt{ \delta ^{f4} } and
# \widetilde{S} = \sqrt{ \delta ^{4n} } V^T
print("Beginning image SVD")
U, delta_vals, Vt = np.linalg.svd(M, full_matrices=False)
delta = np.zeros((4, min(Vt.shape)))
np.fill_diagonal(delta, delta_vals)
print("delta x Vt")
L = U.dot( np.sqrt( np.transpose(delta) ) )
S = np.sqrt( delta ).dot ( Vt )
# Normalise \widetilde{S} by scaling its rows so to have equal norms
S_norms = np.linalg.norm(S, axis=1)
norm_factor = np.average(S_norms[1:]) / S_norms[0]
S[0,:] *= norm_factor
L[:,0] /= norm_factor
# Construct Q. Each row of Q is constructed with quadratic terms cumputed
# from a column of \widetilde{S}
# [...] for a column \vec{q} in \widetilde{S} the corresponding row in Q is
# (q_1^2, ... , q_4^2, 2 q_1 q_2, ... , 2 q_3 q_4)
print("Building Q")
Q1 = np.take(S, (0, 1, 2, 3, 0, 0, 0, 1, 1, 2), axis=0)
Q2 = np.take(S, (0, 1, 2, 3, 1, 2, 3, 2, 3, 3), axis=0)
Q = Q1 * Q2
Q[:,4:] *= 2
Q = np.transpose(Q)
# Using SVD, construct \widetilde{B} to approximate the null space of Q
# (ie., solve Q \vec{b} = 0 and compose \widetilde{B} from the elements of
# \vec{b}.
print("Q SVD")
UQ, SQ, VQ = np.linalg.svd(Q, full_matrices=False)
b = VQ[:,9]
B = np.take(b.flat, (0, 4, 5, 6,
4, 1, 7, 8,
5, 7, 2, 9,
6, 8, 9, 3)).reshape((4, 4))
# Construct \widetilde{A}
print("Constructing A")
B_eig = np.linalg.eigvals(B)
B_eig_sn = np.sign(B_eig)
nb_eig_sn_positive = np.sum(B_eig_sn[B_eig_sn>0])
nb_eig_sn_negative = np.sum(np.abs(B_eig_sn[B_eig_sn<0]))
if 1 in (nb_eig_sn_positive, nb_eig_sn_negative):
if nb_eig_sn_positive == 1:
B = -B
Lambda, W = np.linalg.eigh(B)
idx = np.argsort(Lambda)
Lambda.sort()
Lambda = np.abs(np.diag(Lambda))
W = W[:,idx]
A = np.sqrt( Lambda ).dot( W.T )
else:
J = np.eye(4)
J[0,0] = -1
initial_guess = np.eye(4)
for _ in range(2):
def score(A):
A = A.reshape(4,4)
return np.linalg.norm(B - A.T.dot(J).dot(A), 'fro')
#x = optimize.fmin(
# score,
# initial_guess,
# xtol=1e-15,
# ftol=1e-15,
# maxiter=1e6,
# maxfun=1e6,
#)
x = optimize.basinhopping(
score,
initial_guess,
niter=100,
)
A = x.x.reshape(4, 4)
initial_guess = A
print(score(A))
# Compute the structure \widetilde{A} \widetilde{S}, which provides the
# scene structure up to a scaled Lorentz transformation
print("A x S")
#A = np.eye(4)
structure = A.dot( S )
# A Lorentz transform in matrix form multiplies by [ct x y z].T
normals = structure[1:4,:]
#normals /= np.linalg.norm(normals, axis=0)
normals = np.transpose(normals.reshape(3, w, h), (1, 2, 0))
normals[:,:,1] *= -1
return normals
def colorizeNormals(normals):
"""Generate an image representing the normals."""
# Normalize the normals
nf = np.linalg.norm(normals, axis=normals.ndim - 1)
normals_n = normals / np.dstack((nf, nf, nf))
color = (normals_n + 1) / 2
return color
def generateNormalMap(dims=600):
"""Generate a mapping of the normals of a perfect sphere."""
x, y = np.meshgrid(np.linspace(-1, 1, dims), np.linspace(-1, 1, dims))
zsq = 1 - np.power(x, 2) - np.power(y, 2)
valid = zsq >= 0
z = np.zeros(x.shape)
z[valid] = np.sqrt(zsq[valid])
this_array = np.dstack([x, -y, z]).swapaxes(0, 1)
color = colorizeNormals(this_array)
img = color
img[~valid] = 0
return img
def main():
parser = argparse.ArgumentParser(
description="Photometric Stereo",
)
parser.add_argument(
"--lightning",
nargs="?",
help="Filename of JSON file containing lightning information",
)
parser.add_argument(
"--mask",
nargs="?",
help="Filename of an image containing a mask of the object",
)
parser.add_argument(
"image",
nargs="*",
help="Images filenames",
)
parser.add_argument(
"--generate-map",
action='store_true',
help="Generate a map.png file which represends the colors of the "
"normal mapping.",
)
args = parser.parse_args()
if args.generate_map:
normals = generateNormalMap()
plt.imsave('map.png', normals)
return
if not len(args.image) >= 3:
print("Please specify 3+ image files.")
return
if args.lightning:
normals = photometricStereo(args.lightning, args.image)
if False:
try:
with open('data.pkl', 'rb') as fhdl:
normals = pickle.load(fhdl)
except:
with open('data.pkl', 'wb') as fhdl:
pickle.dump(normals, fhdl)
else:
normals = photometricStereoWithoutLightning(args.image)
if args.mask:
mask = getImage(args.mask)
mask = mask.T
print(normals.shape, mask.shape)
normals[mask<(mask.max() - mask.min())/2.] = np.nan
color = colorizeNormals(normals)
plt.imsave('out.png', color)
mesh.write3dNormals(normals, 'out-3dn.stl')
surface = mesh.surfaceFromNormals(normals)
mesh.writeMesh(surface, normals, 'out-mesh.stl')
if __name__ == "__main__":
main() | mit |
stopfer/opengm | src/interfaces/python/opengm/benchmark/__init__.py | 14 | 2396 | import opengm
import os
import numpy
try:
import matplotlib.pyplot as plt
from matplotlib import pyplot
from matplotlib import pylab
except:
pass
class ModelResult(object):
def __init__(self):
print opengm.configuration
def filenamesFromDir(path,ending='.h5'):
return [path+f for f in os.listdir(path) if f.endswith(ending)]
def plotInfRes(v):
val= v.getValues()
t= v.getTimes()
a=t.copy()
tt=numpy.cumsum(a)
#tt-=tt[0]
p=pylab.plot(tt,val)
print "t0 tt0 tt-1 ",t[0],tt[0],tt[-1]
tt=None
t=None
return p
def makePath(p):
if not os.path.exists(p):
os.makedirs(p)
def makePathEnding(f):
if f.endswith("/"):
return f
else :
return f+"/"
def storeSingleResult(result,outFolder,dataSetName,solverName,gmName):
basePath = makePathEnding(outFolder)
dataSetPath = makePathEnding(basePath+dataSetName)
solverPath = makePathEnding(dataSetPath+solverName)
makePath(solverPath)
def runBenchmark(fNames,solvers,outFolder,dataSetName,plot=False):
nFiles = len(fNames)
nSolver = len(solvers)
result = dict()
for fNr,fName in enumerate(fNames):
#if fNr!=1:
# continue
print fNr+1,"/",nFiles,":",fName
print "load gm"
if isinstance(fName,str):
print "from string"
gm = opengm.loadGm(fName)
else :
print "from gm"
gm = fName
print gm
if plot:
pr=[]
names=[]
#fig, ax = plt.subplots()
fileResult=dict()
#fileResult[fn]=fName
for sNr,solver in enumerate(solvers) :
(sName,sClass,sParam)=solver
print sName
inf=sClass(gm=gm,parameter=sParam)
tv=inf.timingVisitor(verbose=True,multiline=False,visitNth=1)
inf.infer(tv)
# store results
solverResult=dict()
solverResult['values'] = tv.getValues()
solverResult['times'] = tv.getTimes()
solverResult['bounds'] = tv.getBounds()
solverResult['iterations'] = tv.getIterations()
solverResult['name'] = sName
solverResult['arg'] = inf.arg()
solverResult['gmName'] = fName
# write to file
storeSingleResult(result=tv,outFolder=outFolder,dataSetName=dataSetName,solverName=sName,gmName=fName)
# write into result dict
fileResult[sName] = solverResult
if plot:
pr.append(plotInfRes(tv))
print sName
names.append(sName)
result[fName]=fileResult
print names
if plot:
plt.legend( names,loc= 5)
#plt.legend(pr,names)
plt.show()
#return result
| mit |
JPFrancoia/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
jseabold/statsmodels | statsmodels/emplike/tests/test_regression.py | 5 | 5787 | from numpy.testing import assert_almost_equal
import pytest
from statsmodels.regression.linear_model import OLS
from statsmodels.tools import add_constant
from .results.el_results import RegressionResults
from statsmodels.datasets import stackloss
class GenRes(object):
"""
Loads data and creates class instance ot be tested
"""
@classmethod
def setup_class(cls):
data = stackloss.load(as_pandas=False)
data.exog = add_constant(data.exog)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = RegressionResults()
@pytest.mark.slow
class TestRegressionPowell(GenRes):
"""
All confidence intervals are tested by conducting a hypothesis
tests at the confidence interval values.
See Also
--------
test_descriptive.py, test_ci_skew
"""
@pytest.mark.slow
def test_hypothesis_beta0(self):
beta0res = self.res1.el_test([-30], [0], return_weights=1,
method='powell')
assert_almost_equal(beta0res[:2], self.res2.test_beta0[:2], 4)
assert_almost_equal(beta0res[2], self.res2.test_beta0[2], 4)
@pytest.mark.slow
def test_hypothesis_beta1(self):
beta1res = self.res1.el_test([.5], [1], return_weights=1,
method='powell')
assert_almost_equal(beta1res[:2], self.res2.test_beta1[:2], 4)
assert_almost_equal(beta1res[2], self.res2.test_beta1[2], 4)
def test_hypothesis_beta2(self):
beta2res = self.res1.el_test([1], [2], return_weights=1,
method='powell')
assert_almost_equal(beta2res[:2], self.res2.test_beta2[:2], 4)
assert_almost_equal(beta2res[2], self.res2.test_beta2[2], 4)
def test_hypothesis_beta3(self):
beta3res = self.res1.el_test([0], [3], return_weights=1,
method='powell')
assert_almost_equal(beta3res[:2], self.res2.test_beta3[:2], 4)
assert_almost_equal(beta3res[2], self.res2.test_beta3[2], 4)
# Confidence interval results obtained through hypothesis testing in Matlab
@pytest.mark.slow
def test_ci_beta0(self):
beta0ci = self.res1.conf_int_el(0, lower_bound=-52.9,
upper_bound=-24.1, method='powell')
assert_almost_equal(beta0ci, self.res2.test_ci_beta0, 3)
# Slightly lower precision. CI was obtained from nm method.
@pytest.mark.slow
def test_ci_beta1(self):
beta1ci = self.res1.conf_int_el(1, lower_bound=.418, upper_bound=.986,
method='powell')
assert_almost_equal(beta1ci, self.res2.test_ci_beta1, 4)
@pytest.mark.slow
def test_ci_beta2(self):
beta2ci = self.res1.conf_int_el(2, lower_bound=.59,
upper_bound=2.2, method='powell')
assert_almost_equal(beta2ci, self.res2.test_ci_beta2, 5)
@pytest.mark.slow
def test_ci_beta3(self):
beta3ci = self.res1.conf_int_el(3, lower_bound=-.39, upper_bound=.01,
method='powell')
assert_almost_equal(beta3ci, self.res2.test_ci_beta3, 6)
class TestRegressionNM(GenRes):
"""
All confidence intervals are tested by conducting a hypothesis
tests at the confidence interval values.
See Also
--------
test_descriptive.py, test_ci_skew
"""
def test_hypothesis_beta0(self):
beta0res = self.res1.el_test([-30], [0], return_weights=1,
method='nm')
assert_almost_equal(beta0res[:2], self.res2.test_beta0[:2], 4)
assert_almost_equal(beta0res[2], self.res2.test_beta0[2], 4)
def test_hypothesis_beta1(self):
beta1res = self.res1.el_test([.5], [1], return_weights=1,
method='nm')
assert_almost_equal(beta1res[:2], self.res2.test_beta1[:2], 4)
assert_almost_equal(beta1res[2], self.res2.test_beta1[2], 4)
@pytest.mark.slow
def test_hypothesis_beta2(self):
beta2res = self.res1.el_test([1], [2], return_weights=1,
method='nm')
assert_almost_equal(beta2res[:2], self.res2.test_beta2[:2], 4)
assert_almost_equal(beta2res[2], self.res2.test_beta2[2], 4)
@pytest.mark.slow
def test_hypothesis_beta3(self):
beta3res = self.res1.el_test([0], [3], return_weights=1,
method='nm')
assert_almost_equal(beta3res[:2], self.res2.test_beta3[:2], 4)
assert_almost_equal(beta3res[2], self.res2.test_beta3[2], 4)
# Confidence interval results obtained through hyp testing in Matlab
@pytest.mark.slow
def test_ci_beta0(self):
# All confidence intervals are tested by conducting a hypothesis
# tests at the confidence interval values since el_test
# is already tested against Matlab
#
# See Also
# --------
#
# test_descriptive.py, test_ci_skew
beta0ci = self.res1.conf_int_el(0, method='nm')
assert_almost_equal(beta0ci, self.res2.test_ci_beta0, 6)
@pytest.mark.slow
def test_ci_beta1(self):
beta1ci = self.res1.conf_int_el(1, method='nm')
assert_almost_equal(beta1ci, self.res2.test_ci_beta1, 6)
@pytest.mark.slow
def test_ci_beta2(self):
beta2ci = self.res1.conf_int_el(2, lower_bound=.59, upper_bound=2.2,
method='nm')
assert_almost_equal(beta2ci, self.res2.test_ci_beta2, 6)
@pytest.mark.slow
def test_ci_beta3(self):
beta3ci = self.res1.conf_int_el(3, method='nm')
assert_almost_equal(beta3ci, self.res2.test_ci_beta3, 6)
| bsd-3-clause |
lewislone/mStocks | packets-analysis/lib/XlsxWriter-0.7.3/examples/pandas_chart.py | 9 | 1049 | ##############################################################################
#
# An example of converting a Pandas dataframe to an xlsx file with a chart
# using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import pandas as pd
# Create a Pandas dataframe from some data.
df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('pandas_chart.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# Create a chart object.
chart = workbook.add_chart({'type': 'column'})
# Configure the series of the chart from the dataframe data.
chart.add_series({'values': '=Sheet1!$B$2:$B$8'})
# Insert the chart into the worksheet.
worksheet.insert_chart('D2', chart)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| mit |
vanatteveldt/semafor | src/main/python/semafor/framenet/pmi.py | 5 | 1525 | from itertools import chain, combinations, product
import codecs
import json
from math import log
import networkx as nx
import matplotlib as plt
from nltk import FreqDist
from semafor.framenet.frames import FrameHierarchy
THRESHOLD = 4
def draw_graph(graph):
pos = nx.graphviz_layout(graph, prog='dot')
nx.draw(graph, pos, node_color='#A0CBE2', edge_color='#BB0000', width=2, edge_cmap=plt.cm.Blues,
with_labels=True)
def pmi(a, b):
return log(pairs[a, b]) - log(pairs.N()) - log(unigrams[a]) - log(unigrams[b]) + 2 * log(
unigrams.N())
h = FrameHierarchy.load()
# training data contains a bad frame
valid_names = {f.name for f in h._frames.values()}
with codecs.open("../../../training/data/naacl2012/cv.train.sentences.json", encoding="utf8") as train_file:
train = [json.loads(line) for line in train_file]
unsorted_frames = ([(f['target']['spans'][0]['start'], f['target']['name'])
for f in s['frames']] for s in train)
frames = [[name for start, name in sorted(s) if name in valid_names]
for s in unsorted_frames]
del unsorted_frames
unigrams = FreqDist(chain(*frames))
pairs = FreqDist(chain(*[[tuple(sorted(b)) for b in combinations(f, 2)] for f in frames]))
pmis = FreqDist({
(a, b): pmi(a, b)
for a, b in pairs.keys()
if unigrams[a] >= THRESHOLD and unigrams[b] >= THRESHOLD
})
unigrams_with_ancestors = FreqDist(unigrams)
for u in unigrams:
for a in h.ancestors(h._frames[u]):
unigrams_with_ancestors.inc(a.name) | gpl-3.0 |
q1ang/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
filipkilibarda/Ants-on-a-Polygon | simulation.py | 1 | 3153 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import pi,cos,sin,sqrt
import numpy as np
import ants
def calcAnalyticalSolution():
ngon = ants.Ngon(NUMBER_OF_ANTS)
phi = ngon.getInteriorAngle()
intialDistanceAnts = 2*INITIAL_DISTANCE_ORIGIN*sin(2*pi/NUMBER_OF_ANTS/2)
return intialDistanceAnts/(SPEED*(1-sin(phi-pi/2)))
NUMBER_OF_ANTS = 16
SPEED = 1
INITIAL_DISTANCE_ORIGIN = 1
if __name__ == "__main__":
kwargs = {
"antGroup": ants.AntGroup(NUMBER_OF_ANTS),
"maxFrames": 2**20,
"frameReductionFactor": 2**7,
"alpha": 1/1000,
}
simulationManager = ants.SimulationManager(**kwargs)
simulationManager.runSimulation()
def init():
"""initialize animation"""
analy_text.set_text('Expected time = %.10f' %
calcAnalyticalSolution())
return (time_text,)
def animate(i):
"""perform animation step"""
if i >= simulationManager.getNumFramesUsedAfterReduction():
i = simulationManager.getNumFramesUsedAfterReduction()
dots.set_data(
simulationManager.getIthXPositions(i),
simulationManager.getIthYPositions(i)
)
time_text.set_text('Elapsed time = %.10f' %
simulationManager.getIthTimeElapsed(i))
distance_text.set_text('Distance between ants = %.10f' %
simulationManager.getIthDistanceBetweenAnts(i))
return (dots, time_text, distance_text,)
###########################################################
# Setup plot
###########################################################
# set up figure and animation
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-INITIAL_DISTANCE_ORIGIN,
INITIAL_DISTANCE_ORIGIN),
ylim=(-INITIAL_DISTANCE_ORIGIN,
INITIAL_DISTANCE_ORIGIN))
# dots to go on the plot
dots, = ax.plot([], 'bo', ms=.3)
# declare the text that indicates elapsed time
time_text = ax.text(0.02, 0.90, '', transform=ax.transAxes)
# text that idicates the analytical solution
analy_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
# text that indicates the distance between each ant
distance_text = ax.text(0.02, 0.85, '', transform=ax.transAxes)
"""
Interval is the length of time that the animation should pause
in between each frame. The amount of time it takes to calculate
each frame depends on how complicated the calcualation is, but there's
this extra `interval` length of time where the animation pauses
before calculating the next frame.
"""
interval = 20
# number of frame steps to rest on the last frame
pause = 100
ani = animation.FuncAnimation(fig, animate,
frames=simulationManager.getNumFramesUsedAfterReduction()+pause,
interval=interval,
blit=True,
init_func=init,
repeat=False)
ani.save('imgs/ani.gif', writer='imagemagick', fps=50)
# plt.show()
| mit |
galactics/beyond | tests/propagators/test_keplernum.py | 2 | 15660 | import numpy as np
from contextlib import contextmanager
from pytest import fixture, raises, mark
from unittest.mock import patch
import beyond.io.ccsds as ccsds
from beyond.dates import Date, timedelta
from beyond.io.tle import Tle
from beyond.propagators.keplernum import KeplerNum, SOIPropagator
from beyond.env.solarsystem import get_body
from beyond.propagators.listeners import LightListener, NodeListener, find_event, ApsideListener
from beyond.orbits.man import ImpulsiveMan, KeplerianImpulsiveMan, ContinuousMan, KeplerianContinuousMan
import beyond.env.jpl as jpl
@fixture
def orbit_kepler(iss_tle):
orbit = iss_tle.orbit()
orbit.propagator = KeplerNum(
timedelta(seconds=60),
bodies=get_body('Earth')
)
return orbit
@fixture
def molniya_kepler(molniya_tle):
molniya = molniya_tle.orbit()
molniya.propagator = KeplerNum(
timedelta(seconds=120),
bodies=get_body('Earth')
)
return molniya
@contextmanager
def mock_step(orb):
with patch('beyond.propagators.keplernum.KeplerNum._make_step', wraps=orb.propagator._make_step) as mock:
yield mock
def count_steps(td, step, inclusive=True):
"""Count how many steps it take to travel td
Args:
td (timedelta)
step (timedelta)
Return:
int
"""
inclusive = 1 if inclusive else 0
return abs(td) // step + inclusive
def plot_delta_a(dates, altitude, eccentricity=None):
import matplotlib.pyplot as plt
fig = plt.figure()
g1 = fig.add_subplot(111)
p1, = g1.plot(dates, altitude, label="altitude", color="orange")
g1.set_ylabel("Altitude (m)")
g1.yaxis.label.set_color(p1.get_color())
g1.grid(ls=":")
if eccentricity:
g2 = g1.twinx()
p2, = g2.plot(dates, eccentricity, label="eccentricity")
g2.set_ylabel("Eccentricity")
g2.yaxis.label.set_color(p2.get_color())
g2.set_yscale('log')
plt.tight_layout()
plt.show()
def test_propagate_rk4(orbit_kepler):
orbit_kepler.propagator.method = KeplerNum.RK4
assert orbit_kepler.date == Date(2018, 5, 4, 13, 20, 47, 630976)
# simple propagation with a Date object
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(minutes=121, seconds=12))
assert orb2.date == Date(2018, 5, 4, 15, 21, 59, 630976)
assert orb2.propagator.orbit is None # brand new propagator
# simple propagation with a timedelta object
orb3 = orb2.propagate(timedelta(minutes=12, seconds=5))
# Check if the propagator.orbit is initializd for orb2
# and not yet initialized for orb3
assert orb3.date == Date(2018, 5, 4, 15, 34, 4, 630976)
assert orb2.propagator.orbit is not None
assert orb3.propagator.orbit is None
assert np.allclose(
orb3,
[-2267347.5906591383, 3865612.1569156954, -5093932.5567979375, -5238.634675262262, -5326.282920539333, -1708.6895889357945]
)
# simple propagation with a negative step
orb4 = orb3.propagate(timedelta(minutes=-15))
assert orb4.date == orb3.date - timedelta(minutes=15)
def test_micro_step(orbit_kepler):
with mock_step(orbit_kepler) as mock:
# Propagation with micro-step (< to the Kepler propagator step size)
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(seconds=20))
assert orb2.date == orbit_kepler.date + timedelta(seconds=20)
assert mock.call_count == 7
with mock_step(orbit_kepler) as mock:
# negative micro-step
orb2 = orbit_kepler.propagate(orbit_kepler.date - timedelta(seconds=20))
assert orb2.date == orbit_kepler.date - timedelta(seconds=20)
assert mock.call_count == 7
def test_propagate_euler(orbit_kepler):
orbit_kepler.propagator.method = KeplerNum.EULER
assert orbit_kepler.date == Date(2018, 5, 4, 13, 20, 47, 630976)
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(minutes=121, seconds=12))
assert orb2.date == Date(2018, 5, 4, 15, 21, 59, 630976)
assert orb2.propagator.orbit is None # brand new propagator
orb3 = orb2.propagate(timedelta(minutes=12, seconds=5))
assert orb3.date == Date(2018, 5, 4, 15, 34, 4, 630976)
assert orb2.propagator.orbit is not None
assert orb3.propagator.orbit is None
assert np.allclose(
np.array(orb3),
[-880124.9759610161, -10453560.873778934, 6457874.859314914, 4109.877000752121, 1881.4035807734163, 2961.5286009903316]
)
def test_propagate_dopri(orbit_kepler):
orbit_kepler.propagator.method = KeplerNum.DOPRI54
assert orbit_kepler.date == Date(2018, 5, 4, 13, 20, 47, 630976)
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(minutes=121, seconds=12))
assert orb2.date == Date(2018, 5, 4, 15, 21, 59, 630976)
assert orb2.propagator.orbit is None # brand new propagator
orb3 = orb2.propagate(timedelta(minutes=12, seconds=5))
assert orb3.date == Date(2018, 5, 4, 15, 34, 4, 630976)
assert orb2.propagator.orbit is not None # This propagator has been used
assert orb3.propagator.orbit is None # This one not
assert np.allclose(
np.array(orb3),
[-2267319.8725340427, 3865646.423538732, -5093927.810461366, -5238.647479926973, -5326.249640066392, -1708.7264386468821]
)
def test_iter(orbit_kepler):
data = [p for p in orbit_kepler.iter(stop=timedelta(minutes=120))]
assert len(data) == 121
assert min(data, key=lambda x: x.date).date == orbit_kepler.date
assert max(data, key=lambda x: x.date).date == orbit_kepler.date + timedelta(minutes=120)
for p in data:
# Check that no created Orbit object has an initialized propagator
# i.e. that the propagation is done only by the propagator of orbit_kepler
# This happened during development when dealing with listeners and should not happen
# again due to the use of Ephem inside KeplerNum
assert p.propagator.orbit is None
data2 = [p for p in orbit_kepler.iter(stop=timedelta(minutes=120))]
assert data[0].date == data2[0].date
assert all(data[0] == data2[0])
assert data[0] is not data2[0]
# TODO Test retropolation then extrapolation
# same but with step interpolation
def test_iter_on_dates(orbit_kepler):
# Generate a free step ephemeris
start = orbit_kepler.date
stop = timedelta(hours=3)
step = timedelta(seconds=10)
drange = Date.range(start, stop, step, inclusive=True)
ephem = orbit_kepler.ephem(dates=drange)
assert ephem.start == start
assert ephem.stop == start + stop
assert ephem[1].date - ephem[0].date == step
for p in ephem:
assert p.propagator.orbit is None
def test_duty_cycle(orbit_kepler):
with mock_step(orbit_kepler) as mock:
date = Date(2018, 5, 4, 15)
orbit_kepler.propagate(date)
assert mock.call_count == count_steps(orbit_kepler.date - date, orbit_kepler.propagator.step)
assert mock.call_count == 100
with mock_step(orbit_kepler) as mock:
date = orbit_kepler.date - timedelta(seconds=652)
orbit_kepler.propagate(date)
assert mock.call_count == count_steps(orbit_kepler.date - date, orbit_kepler.propagator.step)
assert mock.call_count == 11
with mock_step(orbit_kepler) as mock:
start = Date(2018, 5, 4, 13)
stop = start + timedelta(minutes=90)
data = []
for p in orbit_kepler.iter(start=start, stop=stop):
data.append(p)
assert len(data) == 91
assert data[0].date == start
assert data[-1].date == stop
assert mock.call_count == (
count_steps(orbit_kepler.date - start, orbit_kepler.propagator.step)
+ count_steps(stop - start, orbit_kepler.propagator.step, False)
)
# assert mock.call_count == 125
def test_listener(orbit_kepler):
with mock_step(orbit_kepler) as mock:
start = Date(2018, 5, 4, 13)
stop = start + timedelta(minutes=90)
data = []
for p in orbit_kepler.iter(start=start, stop=stop, listeners=LightListener()):
data.append(p)
assert len(data) == 93
assert mock.call_count == (
count_steps(orbit_kepler.date - start, orbit_kepler.propagator.step)
+ count_steps(stop - start, orbit_kepler.propagator.step, False)
)
# assert mock.call_count == 111
events = [x for x in data if x.event]
assert len(events) == 2
assert events[0].date == Date(2018, 5, 4, 13, 8, 38, 869128)
assert events[0].event.info == "Umbra exit"
assert events[1].date == Date(2018, 5, 4, 14, 5, 21, 256924)
assert events[1].event.info == "Umbra entry"
with mock_step(orbit_kepler) as mock:
start = Date(2018, 5, 4, 13)
stop = start + timedelta(minutes=90)
data = []
for p in orbit_kepler.iter(start=start, stop=stop, listeners=ApsideListener()):
data.append(p)
assert len(data) == 93
assert mock.call_count == (
count_steps(orbit_kepler.date - start, orbit_kepler.propagator.step)
+ count_steps(stop - start, orbit_kepler.propagator.step, False)
)
# assert mock.call_count == 125
events = [x for x in data if x.event]
assert len(events) == 2
assert str(events[0].date) == "2018-05-04T13:08:30.765143 UTC"
assert events[0].event.info == "Periapsis"
assert str(events[1].date) == "2018-05-04T13:54:50.178229 UTC"
assert events[1].event.info == "Apoapsis"
def test_man_impulsive(molniya_kepler):
# Test of a circularisation of a molniya orbit
# At apogee, this is roughly 1400 m/s
with raises(ValueError):
ImpulsiveMan(Date(2018, 9, 20, 13, 48, 21, 763091), (28, 0, 0, 0))
apo = find_event(molniya_kepler.iter(stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
man = ImpulsiveMan(apo.date, (1427., 0, 0), frame="TNW")
# Check on the sensitivity of the find_event function
apo2 = find_event(molniya_kepler.iter(start=molniya_kepler.date + timedelta(seconds=243, minutes=5), stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
assert abs(apo.date - apo2.date) < timedelta(seconds=1)
molniya_kepler.maneuvers = man
altitude = []
eccentricity = []
dates = []
for p in molniya_kepler.iter(stop=timedelta(hours=36)):
altitude.append(p.copy(form='spherical').r - p.frame.center.body.r)
eccentricity.append(p.copy(form="keplerian").e)
dates.append(p.date.datetime)
# plot_delta_a(dates, altitude, eccentricity)
# retrieve the index of the first point after the maneuver
man_idx = (np.array(dates) > man.date.datetime).argmax()
alt_before = np.mean(altitude[:man_idx])
alt_after = np.mean(altitude[man_idx:])
ecc_before = np.mean(eccentricity[:man_idx])
ecc_after = np.mean(eccentricity[man_idx:])
assert abs(ecc_before - 6.47e-1) < 2e-4
assert abs(ecc_after - 3e-3) < 2e-4
# assert abs(ecc_after - 6.57e-4) < 1e-6
assert str(man.date) == "2018-05-03T16:29:23.246451 UTC"
# 8'000 km increment in altitude
assert 8000000 < alt_after - alt_before < 8200000
def test_man_delta_a(molniya_kepler):
apo = find_event(molniya_kepler.iter(stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
man1 = KeplerianImpulsiveMan(apo.date, delta_a=5900000)
molniya_kepler.maneuvers = man1
altitude = []
dates = []
for p in molniya_kepler.iter(stop=timedelta(hours=26)):
altitude.append(p.copy(form='spherical').r - p.frame.center.body.r)
dates.append(p.date.datetime)
# plot_delta_a(dates, altitude)
man_idx = (np.array(dates) > man1.date.datetime).argmax()
before = np.mean(altitude[:man_idx])
after = np.mean(altitude[man_idx:])
assert int(np.linalg.norm(man1._dv)) == 1477
assert 9100000 < after - before < 9200000
def test_man_delta_i(orbit_kepler):
asc = find_event(orbit_kepler.iter(stop=timedelta(minutes=200), listeners=NodeListener()), "Asc Node")
man = KeplerianImpulsiveMan(asc.date, delta_angle=np.radians(5))
orbit_kepler.maneuvers = man
inclination, dates = [], []
for p in orbit_kepler.iter(stop=timedelta(minutes=100)):
inclination.append(p.copy(form="keplerian").i)
dates.append(p.date.datetime)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(dates, np.degrees(inclination))
# plt.show()
before = np.degrees(np.mean(inclination[:30]))
after = np.degrees(np.mean(inclination[-30:]))
assert 4.99 < after - before <= 5.01
@mark.parametrize("method", ["dv", "accel"])
def test_man_continuous(method, molniya_kepler):
duration = timedelta(minutes=10)
apo = find_event(molniya_kepler.iter(stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
if method == "dv":
man1 = ContinuousMan(apo.date, duration, dv=[1427, 0, 0], frame="TNW", date_pos="median")
else:
man1 = ContinuousMan(apo.date, duration, accel=[2.37834, 0, 0], frame="TNW", date_pos="median")
molniya_kepler.maneuvers = man1
altitude = []
eccentricity = []
dates = []
for p in molniya_kepler.iter(stop=timedelta(hours=26)):
altitude.append(p.copy(form='spherical').r - p.frame.center.body.r)
eccentricity.append(p.copy(form="keplerian").e)
dates.append(p.date.datetime)
# plot_delta_a(dates, altitude, eccentricity)
man_idx_min = (np.array(dates) > man1.start.datetime).argmax()
man_idx_max = (np.array(dates) > man1.stop.datetime).argmax()
before = np.mean(altitude[:man_idx_min])
after = np.mean(altitude[man_idx_max:])
assert 8100000 < after - before < 8200000
@mark.jpl
def test_soi(jplfiles):
opm = ccsds.loads("""CCSDS_OPM_VERS = 2.0
CREATION_DATE = 2019-02-22T23:22:31
ORIGINATOR = N/A
META_START
OBJECT_NAME = N/A
OBJECT_ID = N/A
CENTER_NAME = EARTH
REF_FRAME = EME2000
TIME_SYSTEM = UTC
META_STOP
COMMENT State Vector
EPOCH = 2018-05-02T00:00:00.000000
X = 6678.000000 [km]
Y = 0.000000 [km]
Z = 0.000000 [km]
X_DOT = 0.000000 [km/s]
Y_DOT = 7.088481 [km/s]
Z_DOT = 3.072802 [km/s]
COMMENT Keplerian elements
SEMI_MAJOR_AXIS = 6678.000000 [km]
ECCENTRICITY = 0.000000
INCLINATION = 23.436363 [deg]
RA_OF_ASC_NODE = 0.000000 [deg]
ARG_OF_PERICENTER = 0.000000 [deg]
TRUE_ANOMALY = 0.000000 [deg]
COMMENT Escaping Earth
MAN_EPOCH_IGNITION = 2018-05-02T00:39:03.955092
MAN_DURATION = 0.000 [s]
MAN_DELTA_MASS = 0.000 [kg]
MAN_REF_FRAME = TNW
MAN_DV_1 = 3.456791 [km/s]
MAN_DV_2 = 0.000000 [km/s]
MAN_DV_3 = 0.000000 [km/s]
""")
planetary_step = timedelta(seconds=180)
solar_step = timedelta(hours=12)
jpl.create_frames()
central = jpl.get_body('Sun')
planets = jpl.get_body('Earth')
opm = opm.as_orbit(SOIPropagator(solar_step, planetary_step, central, planets))
frames = set()
for orb in opm.iter(stop=timedelta(5)):
frames.add(orb.frame.name)
assert not frames.symmetric_difference(['Sun', 'EME2000'])
# Check if the last point is out of Earth sphere of influence
assert orb.copy(frame='EME2000', form="spherical").r > SOIPropagator.SOI['Earth'].radius
| mit |
trankmichael/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/path.py | 69 | 20263 | """
Contains a class for managing paths (polylines).
"""
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib._path import point_in_path, get_path_extents, \
point_in_path_collection, get_path_collection_extents, \
path_in_path, path_intersects_path, convert_path_to_polygons
from matplotlib.cbook import simple_linear_interpolation
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
to get the vertex/code pairs. This is important, since many
:class:`Path` objects, as an optimization, do not store a *codes*
at all, but have a default one provided for them by
:meth:`iter_segments`.
Note also that the vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 5 # 1 vertex
NUM_VERTICES = [1, 1, 1, 2, 3, 1]
code_type = np.uint8
def __init__(self, vertices, codes=None):
"""
Create a new path with the given vertices and codes.
*vertices* is an Nx2 numpy float array, masked array or Python
sequence.
*codes* is an N-length numpy array or Python sequence of type
:attr:`matplotlib.path.Path.code_type`.
These two arrays must have the same length in the first
dimension.
If *codes* is None, *vertices* will be treated as a series of
line segments.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if codes is not None:
codes = np.asarray(codes, self.code_type)
assert codes.ndim == 1
assert len(codes) == len(vertices)
assert vertices.ndim == 2
assert vertices.shape[1] == 2
self.should_simplify = (len(vertices) >= 128 and
(codes is None or np.all(codes <= Path.LINETO)))
self.has_nonfinite = not np.isfinite(vertices).all()
self.codes = codes
self.vertices = vertices
#@staticmethod
def make_compound_path(*args):
"""
(staticmethod) Make a compound path from a list of Path
objects. Only polygons (not curves) are supported.
"""
for p in args:
assert p.codes is None
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = Path.LINETO * np.ones(total_length)
i = 0
for length in lengths:
codes[i] = Path.MOVETO
i += length
return Path(vertices, codes)
make_compound_path = staticmethod(make_compound_path)
def __repr__(self):
return "Path(%s, %s)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, simplify=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
If *simplify* is provided, it must be a tuple (*width*,
*height*) defining the size of the figure, in native units
(e.g. pixels or points). Simplification implies both removing
adjacent line segments that are very close to parallel, and
removing line segments outside of the figure. The path will
be simplified *only* if :attr:`should_simplify` is True, which
is determined in the constructor by this criteria:
- No curves
- More than 128 vertices
"""
vertices = self.vertices
if not len(vertices):
return
codes = self.codes
len_vertices = len(vertices)
isfinite = np.isfinite
NUM_VERTICES = self.NUM_VERTICES
MOVETO = self.MOVETO
LINETO = self.LINETO
CLOSEPOLY = self.CLOSEPOLY
STOP = self.STOP
if simplify is not None and self.should_simplify:
polygons = self.to_polygons(None, *simplify)
for vertices in polygons:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
elif codes is None:
if self.has_nonfinite:
next_code = MOVETO
for v in vertices:
if np.isfinite(v).all():
yield v, next_code
next_code = LINETO
else:
next_code = MOVETO
else:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
else:
i = 0
was_nan = False
while i < len_vertices:
code = codes[i]
if code == CLOSEPOLY:
yield [], code
i += 1
elif code == STOP:
return
else:
num_vertices = NUM_VERTICES[int(code)]
curr_vertices = vertices[i:i+num_vertices].flatten()
if not isfinite(curr_vertices).all():
was_nan = True
elif was_nan:
yield curr_vertices[-2:], MOVETO
was_nan = False
else:
yield curr_vertices, code
i += num_vertices
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`:
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes)
def contains_point(self, point, transform=None):
"""
Returns *True* if the path contains the given point.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return point_in_path(point[0], point[1], self, transform)
def contains_path(self, path, transform=None):
"""
Returns *True* if this path completely contains the given path.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from transforms import Bbox
if transform is not None:
transform = transform.frozen()
return Bbox(get_path_extents(self, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0):
"""
Convert this path to a list of polygons. Each polygon is an
Nx2 array of vertices. In other words, each polygon has no
``MOVETO`` instructions or curves. This is useful for
displaying in backends that do not support compound paths or
Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
if transform is None:
return [self.vertices]
else:
return [transform.transform(self.vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return convert_path_to_polygons(self, transform, width, height)
_unit_rectangle = None
#@classmethod
def unit_rectangle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]])
return cls._unit_rectangle
unit_rectangle = classmethod(unit_rectangle)
_unit_regular_polygons = WeakValueDictionary()
#@classmethod
def unit_regular_polygon(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
path = Path(verts)
cls._unit_regular_polygons[numVertices] = path
return path
unit_regular_polygon = classmethod(unit_regular_polygon)
_unit_regular_stars = WeakValueDictionary()
#@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
(staticmethod) Returns a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
path = Path(verts)
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
unit_regular_star = classmethod(unit_regular_star)
#@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
unit_regular_asterisk = classmethod(unit_regular_asterisk)
_unit_circle = None
#@classmethod
def unit_circle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit circle.
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(26)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle = Path(vertices, codes)
return cls._unit_circle
unit_circle = classmethod(unit_circle)
#@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
(staticmethod) Returns an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
if (theta2 - theta1 > np.pi) and (eta2 - eta1 < np.pi):
eta2 += twopi
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [Path.MOVETO, Path.LINETO]
codes[-2:] = [Path.LINETO, Path.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = Path.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset :end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset :end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return Path(vertices, codes)
arc = classmethod(arc)
#@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
(staticmethod) Returns a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
wedge = classmethod(wedge)
_get_path_collection_extents = get_path_collection_extents
def get_path_collection_extents(*args):
"""
Given a sequence of :class:`Path` objects, returns the bounding
box that encapsulates all of them.
"""
from transforms import Bbox
if len(args[1]) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_get_path_collection_extents(*args))
| agpl-3.0 |
kastnerkyle/COCORA2012 | gui.py | 1 | 17016 | #!/usr/bin/python
import sys
from PyQt4 import QtGui as qtg
from PyQt4 import QtCore as qtc
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.ticker import FuncFormatter
import ExampleAlg
import numpy as np
import collections
import types
FPATH = "_05.wav"
class MplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi, facecolor='w')
self.axes = self.fig.add_subplot(1,1,1)
#Equivalent to hold(off) in MATLAB, i.e. each plot is fresh
#without showing old data
self.axes.hold(False)
#Plot color order. For more information
#see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.plot
self.colors = ['b', 'r', 'g', 'c', 'm']
#Zoom box color information
self.zoom_color = 'y'
#State variables must be here in order to retain state between
#plot calls
self.alg = ExampleAlg.ExampleAlg(FPATH)
self.zoom = {"x":[],
"y":[]}
#State flag to see if zooming mode is active. Set in the left_pressed
#when the event for left_held is connected, then released when
#left_released is called
self.zooming = None
#Zoom_box holds the x and y values for current zoom box when
#self.zooming == True
self.zoom_box = {"x":{},
"y":{}}
self.zoom_box["x"] = {"data_coords":[],
"axes_coords":[]}
self.zoom_box["y"] = {"data_coords":[],
"axes_coords":[]}
#State storage for the current cursor position in data coordinates
self.cursor_data = {}
self.cursor_data["x"] = 0
self.cursor_data["y"] = 0
#Setting to hold number of channels coming from algorithm
self.num_chans = 0
#Array which wil hold T/F values for which channels to display
self.display_chans = []
#Maximum zoom is 0, x_max and 0, y_max for the x and y axes
self.x_max = 0
self.y_max = 0
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
class DynamicMplCanvas(MplCanvas):
""" A canvas that updates itself every X seconds with a new plot. """
def __init__(self, *args, **kwargs):
#Initialize parent
MplCanvas.__init__(self, *args, **kwargs)
#Set initial plot and initial states
self.compute_initial_figure()
#Create dynamic canvas and start plotting, set timer for graph updates
timer = qtc.QTimer(self)
qtc.QObject.connect(timer,qtc.SIGNAL("timeout()"),self.update_figure)
X = 750 #in milliseconds
timer.start(X)
def draw_figure(self, data):
""" Handles all the drawing code that is shared by the initial plotting
and the dynamic plotting. """
#Link channels in order with the colors list presented by self.colors.
#Note that if data is shorter than colors list, the end channels will
#"disappear"
#TODO: Add skip list to silence channels during runtime
display = self.display_chans
colors = self.colors
args = []
for tg, ch, col in zip(display, data, colors):
if tg == True:
args.append(ch)
args.append(col)
self.axes.plot(*args)
#xs and ys hold the state values for what we want the zoom to be
self.axes.set_xlim(self.zoom["x"][0], self.zoom["x"][1])
self.axes.set_ylim(self.zoom["y"][0], self.zoom["y"][1])
#Display X axes in units of frequency, but we want to leave all the state storage and algorithmic stuff in bin units
#self.axes.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: x*float(self.alg.framerate)/self.alg.fftlen))
#Draw lines for zooming rectangle, with one axis being in data coords
#and the other being in axes coords - see
#http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.axhspan
if self.zooming != None:
try:
self.axes.axhspan(self.zoom_box["y"]["data_coords"][0],
self.zoom_box["y"]["data_coords"][1],
self.zoom_box["x"]["axes_coords"][0],
self.zoom_box["x"]["axes_coords"][1],
color=self.zoom_color,
alpha=.5)
self.axes.axvspan(self.zoom_box["x"]["data_coords"][0],
self.zoom_box["x"]["data_coords"][1],
self.zoom_box["y"]["axes_coords"][0],
self.zoom_box["y"]["axes_coords"][1],
color=self.zoom_color,
alpha=.5)
except IndexError:
#Ignore indexing exceptions - sometimes zoom_box has not been
#filled when plot is called
pass
#Create text in the bottom left that show the data coordinates which the
#mouse is currently hovering over
x = "%s" % float("%.2f" % self.cursor_data["x"])
y = "%s" % float("%.2f" % self.cursor_data["y"])
self.axes.text(-.1, -.1, "x="+x+" y="+y, transform = self.axes.transAxes)
self.draw()
def compute_initial_figure(self):
"""Initialize figure and set maximum X and maximum Y"""
#Get first result from algorithm
self.alg.start()
res = self.alg.run()
#Get number of chans in order to set up toggle boxes
self.num_chans = len(res)
self.display_chans = [False for i in range(self.num_chans)]
#Find maximum value of all channels, excluding DC term ([1:])
max_max = max(map(lambda x: max(x[1:]), res))
#Find length of longest channel
self.x_max = max(map(len, res))
#1.05 is a cushion value so that we can see all of the data at
#farthest zoom out
self.y_max = 1.05*max_max
#Set zoom state to maximum zoom out
self.zoom["x"] = [0, self.x_max]
self.zoom["y"] = [0, self.y_max]
self.axes.set_xlim(self.zoom["x"][0], self.zoom["x"][1])
self.axes.set_ylim(self.zoom["y"][0], self.zoom["y"][1])
self.draw_figure(res)
def update_figure(self):
""" Plot the new data, and set zoom levels to current state values. """
#Get values for next algorithm process
res = self.alg.run()
#Plot new data using configured color scheme
self.draw_figure(res)
class AlgGui(qtg.QWidget):
""" Main GUI class, defines mouse and keyboard control functionality. """
#To see a tutorial on using the transforms...
#http://matplotlib.sourceforge.net/users/transforms_tutorial.html
def __init__(self):
qtg.QWidget.__init__(self)
self.graph = DynamicMplCanvas(self, width=10, height=10, dpi=100)
#Storage for click coordinates during click state
self.coords = {"x":[],
"y":[]}
self.initUI()
def genEditFunction(self, key, le, mn, mx):
""" Generator function for making a specific textChanged function
in order to connect to a QLineEdit box. Only works for integer
inputs to QLineEdit box. """
def textChanged(string):
#Check that le is between mn and mx
pos = 0
v = qtg.QIntValidator(mn, mx, le)
le.setValidator(v)
#Bounds checking
if v.validate(string, pos) == qtg.QValidator.Invalid:
value = self.graph.alg.adjustable_params[key]["current_value"]
le.setText(str(value))
print("Input of " + str(string) + " is outside range " + str(mn) + "," + str(mx))
else:
try:
self.graph.alg.adjustable_params[key]["current_value"] = int(string)
except ValueError:
#Do this to suppress printing of error when line is blank
pass
return textChanged
def genIdleFunction(self, key, le):
""" Generator for a super simple test of box contents. """
def editingFinished():
if len(le.text()) < 1:
self.graph.alg.adjustable_params[key]["min_value"]
le.setText(str(value))
return editingFinished
def genSliderFunction(self, key, le, mn, mx):
""" Generator function for making the value changed function for a particular slider """
def valueChanged(value):
res = value*mx/100 if value*mx/100 > mn else mn
le.setText(str(res))
self.graph.alg.adjustable_params[key]["current_value"] = res
return valueChanged
def addSliders(self, widgets):
""" Function to add arbitrary number of sliders to the display """
for key in self.graph.alg.adjustable_params.keys():
#Add a label to the widgets dict
widgets[str(key) + "_label"] = qtg.QLabel(str(key))
#Get data extents for bounds checking
mn = self.graph.alg.adjustable_params[key]["min"]
mx = self.graph.alg.adjustable_params[key]["max"]
#Create a line edit widget and connect it to the generated
#textChanged function from the genEditFunction
le = qtg.QLineEdit(self)
edit = self.genEditFunction(key, le, mn, mx)
le.textChanged.connect(edit)
#Set text to min value if editing finishes as blank...
#Currently bugged in Ubuntu 11.10
fin = self.genIdleFunction(key, le)
le.editingFinished.connect(fin)
#Set text to default value
value = self.graph.alg.adjustable_params[key]["current_value"]
le.setText(str(value))
widgets[str(key) + "_current_value"] = le
#Create a slider, connect it to the generated sliderFunction,
#and add it to the widgets dict
sld = qtg.QSlider(qtc.Qt.Horizontal, self)
fn = self.genSliderFunction(key, le, mn, mx)
sld.valueChanged.connect(fn)
widgets[str(key) + "_slider"] = sld
#Add an empty space, so that widgets are better grouped visually
widgets[str(key) + "_spacer"] = qtg.QLabel(" ")
def boundsCheck(self, xdata, ydata):
"""Make sure that zoom boundaries are within data window"""
xdata = self.graph.zoom["x"][0] if xdata < self.graph.zoom["x"][0] else xdata
xdata = self.graph.zoom["x"][1] if xdata > self.graph.zoom["x"][1] else xdata
ydata = self.graph.zoom["y"][0] if ydata < self.graph.zoom["y"][0] else ydata
ydata = self.graph.zoom["y"][1] if ydata > self.graph.zoom["y"][1] else ydata
return (xdata, ydata)
def left_pressed(self, event):
"""Record location where the left click started"""
#Use the transform so we enable the ability to click outside axes,
#as event.xdata = None if event.inaxes == False
#Also make sure not to zoom outside data bounds
if event.button == 1:
xdata, ydata = self.graph.axes.transData.inverted().transform((event.x, event.y))
xdata, ydata = self.boundsCheck(xdata, ydata)
#Add location data to self.coords for storage
self.coords["x"].append(xdata)
self.coords["y"].append(ydata)
#Set the zooming state so it is no longer None
self.graph.zooming = self.graph.mpl_connect("motion_notify_event", self.left_held)
def left_held(self, event):
"""Method for use during zoom event"""
#Get x and y coordinates from data coords where left click started
x_temp, y_temp = self.graph.axes.transData.transform((self.coords["x"][0], self.coords["y"][0]))
#Get x and y data points for where the current event is
x0, y0 = self.graph.axes.transData.inverted().transform((event.x, event.y))
#Save off data coords
self.graph.zoom_box["x"]["data_coords"] = sorted([self.coords["x"][0], x0])
self.graph.zoom_box["y"]["data_coords"] = sorted([self.coords["y"][0], y0])
#Get axes coordinates for where left click started
x1, y1 = self.graph.axes.transAxes.inverted().transform((x_temp, y_temp))
#Get current coordinates of cursor
x2, y2 = self.graph.axes.transAxes.inverted().transform((event.x, event.y))
#Make sure the box is always left, right and lower, higher
self.graph.zoom_box["x"]["axes_coords"] = sorted([x1, x2])
self.graph.zoom_box["y"]["axes_coords"] = sorted([y1, y2])
def left_released(self, event):
"""Record location of click release, then update axes state"""
if event.button == 1:
#Get data coordinate for event. Use this method because event.x and
#event.y return None when event.inaxes == None
xdata, ydata = self.graph.axes.transData.inverted().transform((event.x, event.y))
xdata, ydata = self.boundsCheck(xdata, ydata)
#Append release coordinates to the stored value for where left click
#started.
self.coords["x"].append(xdata)
self.coords["y"].append(ydata)
x_list = self.coords["x"]
y_list = self.coords["y"]
#xs and ys hold the zoom state of the plot, so update those
#TODO: Check that zoom box covers some portion inside the graph
self.graph.zoom["x"] = sorted(x_list)
self.graph.zoom["y"] = sorted(y_list)
#Disconnect event and return zooming flag to None state
self.graph.mpl_disconnect(self.graph.zooming)
self.graph.zooming = None
#Empty out coords, left click is no longer pressed
self.coords["x"] = []
self.coords["y"] = []
def right_pressed(self, event):
"""Zoom out to initial zoom level"""
if event.button == 3:
#Zoom to initial state
self.graph.zoom["x"] = [0, self.graph.x_max]
self.graph.zoom["y"] = [0, self.graph.y_max]
def display_cursor_point(self, event):
"""Show the data coordinate where the mouse cursor is hovering"""
if event.inaxes != None:
self.graph.cursor_data["x"] = event.xdata
self.graph.cursor_data["y"] = event.ydata
def genCheckboxFunction(self, num):
"""Generator for a channel toggle checkboxes. """
def toggleChannel():
self.graph.display_chans[num] = not self.graph.display_chans[num]
return toggleChannel
def addCheckboxes(self, widgets):
"""Add textboxes to passed in collection."""
for i in range(self.graph.num_chans):
cb = qtg.QCheckBox()
widgets['chan_'+str(i)+'checkbox'] = cb
fn = self.genCheckboxFunction(i)
cb.stateChanged.connect(fn)
def initLayout(self):
hbox = qtg.QHBoxLayout()
#Click and drag zooming functions
self.zoom_start = self.graph.mpl_connect("button_press_event", self.left_pressed)
self.zoom_end = self.graph.mpl_connect("button_release_event", self.left_released)
#Undo zoom functions
self.unzoom = self.graph.mpl_connect("button_press_event", self.right_pressed)
#Cursor positional display
self.cursor_pos = self.graph.mpl_connect("motion_notify_event", self.display_cursor_point)
#Plot graphic
hbox.addWidget(self.graph)
vbox = qtg.QVBoxLayout()
hbox.addStretch(1)
hbox.addLayout(vbox)
#Top right widgets, pass in widgets dict so sliders can be added
widgets = collections.OrderedDict()
self.addSliders(widgets)
[vbox.addWidget(x) for x in widgets.values()]
vbox.addStretch(1)
#Bottom right widgets, pass in checbox_widgets so checkboxes can be added
vbox.addWidget(qtg.QLabel("Enable Channels 1 - "+str(self.graph.num_chans)))
hbox_check = qtg.QHBoxLayout()
checkbox_widgets = collections.OrderedDict()
self.addCheckboxes(checkbox_widgets)
[hbox_check.addWidget(x) for x in checkbox_widgets.values()]
vbox.addLayout(hbox_check)
self.setLayout(hbox)
def initUI(self):
#Set window title to the name of the included algorithm
self.setWindowTitle(self.graph.alg.__class__.__name__)
self.initLayout()
self.show()
if __name__ == "__main__":
app = qtg.QApplication(sys.argv)
g = AlgGui()
sys.exit(app.exec_())
| bsd-3-clause |
jzt5132/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/scalar/timestamp/test_comparisons.py | 7 | 6112 | # -*- coding: utf-8 -*-
from datetime import datetime
import operator
import pytest
import numpy as np
from dateutil.tz import tzutc
from pytz import utc
from pandas.compat import long, PY2
from pandas import Timestamp
class TestTimestampComparison(object):
def test_comparison_object_array(self):
# GH#15183
ts = Timestamp('2011-01-03 00:00:00-0500', tz='US/Eastern')
other = Timestamp('2011-01-01 00:00:00-0500', tz='US/Eastern')
naive = Timestamp('2011-01-01 00:00:00')
arr = np.array([other, ts], dtype=object)
res = arr == ts
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, ts],
[ts, other]],
dtype=object)
res = arr != ts
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
# tzaware mismatch
arr = np.array([naive], dtype=object)
with pytest.raises(TypeError):
arr < ts
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
assert val == val
assert not val != val
assert not val < val
assert val <= val
assert not val > val
assert val >= val
other = datetime(2012, 5, 18)
assert val == other
assert not val != other
assert not val < other
assert val <= other
assert not val > other
assert val >= other
other = Timestamp(stamp + 100)
assert val != other
assert val != other
assert val < other
assert val <= other
assert other > val
assert other >= val
def test_compare_invalid(self):
# GH 8058
val = Timestamp('20130101 12:01:02')
assert not val == 'foo'
assert not val == 10.0
assert not val == 1
assert not val == long(1)
assert not val == []
assert not val == {'foo': 1}
assert not val == np.float64(1)
assert not val == np.int64(1)
assert val != 'foo'
assert val != 10.0
assert val != 1
assert val != long(1)
assert val != []
assert val != {'foo': 1}
assert val != np.float64(1)
assert val != np.int64(1)
def test_cant_compare_tz_naive_w_aware(self):
# see gh-1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
pytest.raises(Exception, a.__eq__, b)
pytest.raises(Exception, a.__ne__, b)
pytest.raises(Exception, a.__lt__, b)
pytest.raises(Exception, a.__gt__, b)
pytest.raises(Exception, b.__eq__, a)
pytest.raises(Exception, b.__ne__, a)
pytest.raises(Exception, b.__lt__, a)
pytest.raises(Exception, b.__gt__, a)
if PY2:
pytest.raises(Exception, a.__eq__, b.to_pydatetime())
pytest.raises(Exception, a.to_pydatetime().__eq__, b)
else:
assert not a == b.to_pydatetime()
assert not a.to_pydatetime() == b
def test_cant_compare_tz_naive_w_aware_explicit_pytz(self):
# see gh-1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz=utc)
pytest.raises(Exception, a.__eq__, b)
pytest.raises(Exception, a.__ne__, b)
pytest.raises(Exception, a.__lt__, b)
pytest.raises(Exception, a.__gt__, b)
pytest.raises(Exception, b.__eq__, a)
pytest.raises(Exception, b.__ne__, a)
pytest.raises(Exception, b.__lt__, a)
pytest.raises(Exception, b.__gt__, a)
if PY2:
pytest.raises(Exception, a.__eq__, b.to_pydatetime())
pytest.raises(Exception, a.to_pydatetime().__eq__, b)
else:
assert not a == b.to_pydatetime()
assert not a.to_pydatetime() == b
def test_cant_compare_tz_naive_w_aware_dateutil(self):
# see gh-1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz=tzutc())
pytest.raises(Exception, a.__eq__, b)
pytest.raises(Exception, a.__ne__, b)
pytest.raises(Exception, a.__lt__, b)
pytest.raises(Exception, a.__gt__, b)
pytest.raises(Exception, b.__eq__, a)
pytest.raises(Exception, b.__ne__, a)
pytest.raises(Exception, b.__lt__, a)
pytest.raises(Exception, b.__gt__, a)
if PY2:
pytest.raises(Exception, a.__eq__, b.to_pydatetime())
pytest.raises(Exception, a.to_pydatetime().__eq__, b)
else:
assert not a == b.to_pydatetime()
assert not a.to_pydatetime() == b
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt',
'lt': 'gt',
'ge': 'le',
'le': 'ge',
'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
assert result == expected
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
assert result == expected
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
assert not stamp == datetime.min
assert not stamp == datetime(1600, 1, 1)
assert not stamp == datetime(2700, 1, 1)
assert stamp != datetime.min
assert stamp != datetime(1600, 1, 1)
assert stamp != datetime(2700, 1, 1)
assert stamp > datetime(1600, 1, 1)
assert stamp >= datetime(1600, 1, 1)
assert stamp < datetime(2700, 1, 1)
assert stamp <= datetime(2700, 1, 1)
| bsd-3-clause |
Silmathoron/NNGT | nngt/__init__.py | 1 | 10076 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
NNGT
====
Package aimed at facilitating the analysis of Neural Networks and Graphs'
Topologies in Python by providing a unified interface for network generation
and analysis.
The library mainly provides algorithms for
1. generating networks
2. studying their topological properties
3. doing some basic spatial, topological, and statistical visualizations
4. interacting with neuronal simulators and analyzing neuronal activity
Available modules
-----------------
analysis
Tools to study graph topology and neuronal activity.
core
Where the main classes are coded; however, most useful classes and methods
for users are loaded at the main level (`nngt`) when the library is imported,
so `nngt.core` should generally not be used.
generation
Functions to generate specific networks.
geometry
Tools to work on metric graphs (see
`PyNCulture <https://github.com/SENeC-Initiative/PyNCulture>`_).
io
Tools for input/output operations.
lib
Basic functions used by several most other modules.
simulation
Tools to provide complex network generation with NEST and help analyze the
influence of the network structure on neuronal activity.
plot
Plot data or graphs using matplotlib.
Units
-----
Functions related to spatial embedding of networks are using micrometers
(um) as default unit; other units from the metric system can also be
provided:
- `mm` for milimeters
- `cm` centimeters
- `dm` for decimeters
- `m` for meters
Main classes and functions
==========================
"""
import os as _os
import errno as _errno
import importlib.util as _imputil
import shutil as _shutil
import sys as _sys
import logging as _logging
import numpy as _np
__version__ = '2.2.1'
# ----------------------- #
# Requirements and config #
# ----------------------- #
# IMPORTANT: configuration MUST come first
_config = {
'color_lib': 'matplotlib',
'db_folder': "~/.nngt/database",
'db_name': "main",
'db_to_file': False,
'db_url': None,
'graph': object,
'backend': "nngt",
'library': None,
'log_folder': "~/.nngt/log",
'log_level': 10,
'log_to_file': False,
'mpi': False,
'mpi_comm': None,
'mpl_backend': None,
'msd': None,
'multithreading': True,
'omp': 1,
'palette_continuous': 'magma',
'palette_discrete': 'Set1',
'use_database': False,
'use_tex': False,
'seeds': None,
'with_nest': False,
'with_plot': False,
}
# tools for nest interactions (can be used in config)
_old_nest_func = {}
# random generator for numpy
_rng = _np.random.default_rng()
# state of master seed (already seeded or not)
_seeded = False
# state of local seeds for multithreading or MPI (already used or not)
_seeded_local = False
_used_local = False
# database (predeclare here, can be used in config)
_db = None
_main_db = None
# configuration folders and files
_lib_folder = _os.path.expanduser('~') + '/.nngt'
_new_config = _os.path.expanduser('~') + '/.nngt/nngt.conf'
_default_config = _os.path.dirname(_os.path.realpath(__file__)) + \
'/nngt.conf.default'
# check that library config folder exists
if not _os.path.isdir(_lib_folder):
try:
_os.mkdir(_lib_folder)
except OSError as e:
if e.errno != _errno.EEXIST:
raise
# IMPORTANT: first create logger
from .lib.logger import _init_logger, _log_message
_logger = _logging.getLogger(__name__)
_init_logger(_logger)
# IMPORTANT: afterwards, import config
from .lib.nngt_config import (get_config, set_config, _load_config, _convert,
_log_conf_changed, _lazy_load)
# check that config file exists
if not _os.path.isfile(_new_config): # if it does not, create it
_shutil.copy(_default_config, _new_config)
else: # if it does check it is up-to-date
with open(_new_config, 'r+') as fconfig:
_options = [l.strip() for l in fconfig if l.strip() and l[0] != "#"]
config_version = ""
for _opt in _options:
sep = _opt.find("=")
_opt_name = _opt[:sep].strip()
_opt_val = _convert(_opt[sep+1:].strip())
if _opt_name == "version":
config_version = _opt_val
if config_version != __version__:
fconfig.seek(0)
data = []
with open(_default_config) as fdefault:
data = [l for l in fdefault]
i = 0
for line in data:
if '{version}' in line:
fconfig.write(line.format(version=__version__))
i += 1
break
else:
fconfig.write(line)
i += 1
for line in data[i:]:
fconfig.write(line)
fconfig.truncate()
_log_message(_logger, "WARNING",
"Updating the configuration file, your previous "
"settings have be overwritten.")
_load_config(_new_config)
# multithreading
_config["omp"] = int(_os.environ.get("OMP", 1))
if _config["omp"] > 1:
_config["multithreading"] = True
# --------------------- #
# Loading graph library #
#---------------------- #
from .lib.graph_backends import use_backend, analyze_graph
_libs = ['graph-tool', 'igraph', 'networkx']
_glib = _config['backend']
assert _glib in _libs or _glib == 'nngt', \
"Internal error for graph library loading, please report " +\
"this on GitHub."
try:
use_backend(_config['backend'], False, silent=True)
except ImportError:
idx = _libs.index(_config['backend'])
del _libs[idx]
keep_trying = True
while _libs and keep_trying:
try:
use_backend(_libs[-1], False, silent=True)
keep_trying = False
except ImportError:
_libs.pop()
if not _libs:
use_backend('nngt', False, silent=True)
_log_message(_logger, "WARNING",
"This module needs one of the following graph libraries to "
"study networks: `graph_tool`, `igraph`, or `networkx`.")
# ------- #
# Modules #
# ------- #
# import some tools into main namespace
from .io.graph_loading import load_from_file
from .io.graph_saving import save_to_file
from .lib.rng_tools import seed
from .lib.test_functions import on_master_process, num_mpi_processes
from .core.group_structure import Group, MetaGroup, Structure
from .core.neural_pop_group import (GroupProperty, MetaNeuralGroup,
NeuralGroup, NeuralPop)
from .core.graph import Graph
from .core.spatial_graph import SpatialGraph
from .core.networks import Network, SpatialNetwork
from .generation.graph_connectivity import generate
# import modules
from . import analysis
from . import core
from . import generation
from . import geometry
from . import io
from . import lib
__all__ = [
"analysis",
"analyze_graph",
"core",
"generate",
"generation",
"geometry",
"get_config",
"Graph",
"GroupProperty",
"lib",
"load_from_file",
"Network",
"NeuralGroup",
"NeuralPop",
"num_mpi_processes",
"on_master_process",
"save_to_file",
"seed",
"set_config",
"SpatialGraph",
"SpatialNetwork",
"use_backend",
"__version__"
]
# test if plot module is supported
try:
from . import plot
_config['with_plot'] = True
__all__.append('plot')
except ImportError as e:
_log_message(_logger, "DEBUG",
"An error occured, plot module will not be loaded: " + str(e))
_config['with_plot'] = False
if _imputil.find_spec("nest") is not None:
_config['with_nest'] = True
simulation = _lazy_load("nngt.simulation")
__all__.append("simulation")
# load database module if required
if _config["use_database"]:
try:
from . import database
__all__.append('database')
except ImportError as e:
_log_message(_logger, "DEBUG",
"Could not load database module: " + str(e))
# ------------------------ #
# Print config information #
# ------------------------ #
_glib_version = (_config["library"].__version__[:5]
if _config["library"] is not None else __version__)
try:
import svg.path as _svg
_has_svg = True
except:
_has_svg = False
try:
import dxfgrabber as _dxf
_has_dxf = True
except:
_has_dxf = False
try:
import shapely as _shapely
_has_shapely = _shapely.__version__
except:
_has_shapely = False
_log_info = '''
# ----------- #
# NNGT loaded #
# ----------- #
Graph library: {gl}
Multithreading: {thread} ({omp} thread{s})
MPI: {mpi}
Plotting: {plot}
NEST support: {nest}
Shapely: {shapely}
SVG support: {svg}
DXF support: {dxf}
Database: {db}
'''.format(
gl = _config["backend"] + ' ' + _glib_version,
thread = _config["multithreading"],
plot = _config["with_plot"],
nest = _config["with_nest"],
db =_config["use_database"],
omp = _config["omp"],
s = "s" if _config["omp"] > 1 else "",
mpi = _config["mpi"],
shapely = _has_shapely,
svg = _has_svg,
dxf = _has_dxf,
)
_log_conf_changed(_log_info)
| gpl-3.0 |
intel-analytics/analytics-zoo | pyzoo/test/zoo/orca/learn/ray/tf/test_tf_ray_estimator.py | 1 | 21559 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
import numpy as np
import pytest
import tensorflow as tf
from zoo import init_nncontext
from zoo.orca.data import XShards
import zoo.orca.data.pandas
from zoo.orca.learn.tf2 import Estimator
from zoo.ray import RayContext
import ray
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
import os
resource_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)), "../../../../resources")
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def create_train_datasets(config, batch_size):
import tensorflow as tf
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).batch(
batch_size)
return train_dataset
def create_test_dataset(config, batch_size):
import tensorflow as tf
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def simple_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
import tensorflow as tf
if "lr" in config:
lr = config["lr"]
else:
lr = 1e-3
args = {
"optimizer": tf.keras.optimizers.SGD(lr),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
def model_creator(config):
model = simple_model(config)
model.compile(**compile_args(config))
return model
def identity_model_creator(config):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(1)),
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
model.compile()
return model
def create_auto_shard_datasets(config, batch_size):
import tensorflow as tf
data_path = os.path.join(resource_path, "orca/learn/test_auto_shard/*.csv")
dataset = tf.data.Dataset.list_files(data_path)
dataset = dataset.interleave(lambda x: tf.data.TextLineDataset(x))
dataset = dataset.map(lambda x: tf.strings.to_number(x))
dataset = dataset.map(lambda x: (x, x))
dataset = dataset.batch(batch_size)
return dataset
def create_auto_shard_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
return model
def create_auto_shard_compile_args(config):
import tensorflow as tf
def loss_func(y1, y2):
return tf.abs(y1[0] - y1[1]) + tf.abs(y2[0] - y2[1])
args = {
"optimizer": tf.keras.optimizers.SGD(lr=0.0),
"loss": loss_func,
}
return args
def auto_shard_model_creator(config):
model = create_auto_shard_model(config)
model.compile(**create_auto_shard_compile_args(config))
return model
class LRChecker(tf.keras.callbacks.Callback):
def __init__(self, *args):
super(LRChecker, self).__init__(*args)
self.warmup_lr = [0.16, 0.22, 0.28, 0.34, 0.4]
def on_epoch_end(self, epoch, logs=None):
current_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
print("epoch {} current lr is {}".format(epoch, current_lr))
if epoch < 5:
assert abs(current_lr - self.warmup_lr[epoch]) < 1e-5
elif 5 <= epoch < 10:
assert abs(current_lr - 0.4) < 1e-5
elif 10 <= epoch < 15:
assert abs(current_lr - 0.04) < 1e-5
elif 15 <= epoch < 20:
assert abs(current_lr - 0.004) < 1e-5
else:
assert abs(current_lr - 0.0004) < 1e-5
class TestTFRayEstimator(TestCase):
def impl_test_fit_and_evaluate(self, backend):
import tensorflow as tf
ray_ctx = RayContext.get()
batch_size = 32
global_batch_size = batch_size * ray_ctx.num_ray_nodes
if backend == "horovod":
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=None,
backend=backend)
else:
trainer = Estimator.from_keras(model_creator=model_creator,
verbose=True,
config=None,
backend=backend,
workers_per_node=2)
# model baseline performance
start_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(start_stats)
def scheduler(epoch):
if epoch < 2:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (2 - epoch))
scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
# train for 2 epochs
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
# model performance after training (should improve)
end_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(end_stats)
# sanity check that training worked
dloss = end_stats["validation_loss"] - start_stats["validation_loss"]
dmse = (end_stats["validation_mean_squared_error"] -
start_stats["validation_mean_squared_error"])
print(f"dLoss: {dloss}, dMSE: {dmse}")
assert dloss < 0 and dmse < 0, "training sanity check failed. loss increased!"
def test_fit_and_evaluate_tf(self):
self.impl_test_fit_and_evaluate(backend="tf2")
def test_fit_and_evaluate_horovod(self):
self.impl_test_fit_and_evaluate(backend="horovod")
def test_auto_shard_tf(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=auto_shard_model_creator,
verbose=True,
backend="tf2", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
def test_auto_shard_horovod(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=create_auto_shard_model,
compile_args_creator=create_auto_shard_compile_args,
verbose=True,
backend="horovod", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
# this needs horovod >= 0.19.2
def test_horovod_learning_rate_schedule(self):
import horovod
major, minor, patch = horovod.__version__.split(".")
larger_major = int(major) > 0
larger_minor = int(major) == 0 and int(minor) > 19
larger_patch = int(major) == 0 and int(minor) == 19 and int(patch) >= 2
if larger_major or larger_minor or larger_patch:
ray_ctx = RayContext.get()
batch_size = 32
workers_per_node = 4
global_batch_size = batch_size * workers_per_node
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=config,
backend="horovod", workers_per_node=workers_per_node)
import horovod.tensorflow.keras as hvd
callbacks = [
hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, initial_lr=0.4,
verbose=True),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=5, end_epoch=10,
multiplier=1., initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=10, end_epoch=15,
multiplier=1e-1, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=15, end_epoch=20,
multiplier=1e-2, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=20, multiplier=1e-3,
initial_lr=0.4),
LRChecker()
]
for i in range(30):
trainer.fit(create_train_datasets, epochs=1, batch_size=global_batch_size,
callbacks=callbacks)
else:
# skip tests in horovod lower version
pass
def test_sparkxshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_dataframe(self):
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_with_empty_partition(self):
from zoo.orca import OrcaContext
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 10)
rdd_with_empty = rdd.repartition(4).\
mapPartitionsWithIndex(lambda idx, part: [] if idx == 0 else part)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd_with_empty.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=()))))\
.toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_pandas_dataframe(self):
def model_creator(config):
import tensorflow as tf
input1 = tf.keras.layers.Input(shape=(1,))
input2 = tf.keras.layers.Input(shape=(1,))
concatenation = tf.concat([input1, input2], axis=-1)
outputs = tf.keras.layers.Dense(units=1, activation='softmax')(concatenation)
model = tf.keras.Model(inputs=[input1, input2], outputs=outputs)
model.compile(**compile_args(config))
return model
file_path = os.path.join(resource_path, "orca/learn/ncf2.csv")
train_data_shard = zoo.orca.data.pandas.read_csv(file_path)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=1)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["user", "item"],
label_cols=["label"])
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25,
feature_cols=["user", "item"], label_cols=["label"])
trainer.predict(train_data_shard, feature_cols=["user", "item"]).collect()
def test_dataframe_shard_size(self):
from zoo.orca import OrcaContext
OrcaContext._shard_size = 3
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_partition_num_less_than_workers(self):
sc = init_nncontext()
rdd = sc.range(200, numSlices=1)
assert rdd.getNumPartitions() == 1
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
assert df.rdd.getNumPartitions() < trainer.num_workers
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_predict(self):
sc = init_nncontext()
rdd = sc.parallelize(range(20))
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config={},
workers_per_node=2)
result = estimator.predict(df, batch_size=4,
feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_sparkxshards_with_inbalanced_data(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
def random_pad(data):
import numpy as np
import random
times = random.randint(1, 10)
data["x"] = np.concatenate([data["x"]] * times)
data["y"] = np.concatenate([data["y"]] * times)
return data
train_data_shard = train_data_shard.transform_shard(random_pad)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_predict_xshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100,))})
expected = train_data_shard.collect()
expected = [shard["x"] for shard in expected]
for x in expected:
print(x.shape)
expected = np.concatenate(expected)
config = {
}
trainer = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config=config,
workers_per_node=2)
result_shards = trainer.predict(train_data_shard, batch_size=10).collect()
result = [shard["prediction"] for shard in result_shards]
expected_result = [shard["x"] for shard in result_shards]
result = np.concatenate(result)
assert np.allclose(expected, result)
def test_save_and_load(self):
def model_creator(config):
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')]
)
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def train_data_creator(config, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(100, 28, 28, 3),
np.random.randint(0, 10, (100, 1))))
dataset = dataset.repeat()
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size)
return dataset
batch_size = 320
try:
est = Estimator.from_keras(model_creator=model_creator, workers_per_node=2)
history = est.fit(train_data_creator,
epochs=1,
batch_size=batch_size,
steps_per_epoch=5)
print("start saving")
est.save("/tmp/cifar10_keras.ckpt")
est.load("/tmp/cifar10_keras.ckpt")
print("save success")
finally:
os.remove("/tmp/cifar10_keras.ckpt")
if __name__ == "__main__":
pytest.main([__file__])
| apache-2.0 |
massmutual/scikit-learn | sklearn/externals/joblib/__init__.py | 72 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
dariox2/CADL | session-5/s5p3-latent_space_arithmetic.py | 1 | 18992 |
#
# Session 5, part 3
#
print("Begin import...")
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
#from skimage import data # ERROR: Cannot load libmkl_def.so
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
print("Loading tensorflow...")
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, nb_utils
# dja
plt.style.use('bmh')
#import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
plt.figure(figsize=(4, 4))
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
def wait(n):
#plt.pause(n)
plt.pause(3)
#input("(press enter)")
##
## Part 3 - Latent-Space Arithmetic
##
#
# Loading the Pre-Trained Model
#
# We're now going to work with a pre-trained VAEGAN model on the
# Celeb Net dataset. Let's load this model:
tf.reset_default_graph()
print("Import vaegan model...")
from libs import celeb_vaegan as CV
net = CV.get_celeb_vaegan_model()
# We'll load the graph_def contained inside this dictionary. It
# follows the same idea as the `inception`, `vgg16`, and `i2v`
# pretrained networks. It is a dictionary with the key `graph_def`
# defined, with the graph's pretrained network. It also includes
# `labels` and a `preprocess` key. We'll have to do one additional
# thing which is to turn off the random sampling from variational
# layer. This isn't really necessary but will ensure we get the same
# results each time we use the network. We'll use the `input_map`
# argument to do this. Don't worry if this doesn't make any sense, as
# we didn't cover the variational layer in any depth. Just know that
# this is removing a random process from the network so that it is
# completely deterministic. If we hadn't done this, we'd get slightly
# different results each time we used the network (which may even be
# desirable for your purposes).
sess = tf.Session()
g = tf.get_default_graph()
print("import graph_def...")
tf.import_graph_def(net['graph_def'], name='net', input_map={
'encoder/variational/random_normal:0': np.zeros(512, dtype=np.float32)})
#for op in g.get_operations():
# print(op.name)
# Now let's get the relevant parts of the network: `X`, the input
# image to the network, `Z`, the input image's encoding, and `G`, the
# decoded image. In many ways, this is just like the Autoencoders we
# learned about in Session 3, except instead of `Y` being the output,
# we have `G` from our generator! And the way we train it is very
# different: we use an adversarial process between the generator and
# discriminator, and use the discriminator's own distance measure to
# help train the network, rather than pixel-to-pixel differences.
X = g.get_tensor_by_name('net/x:0')
Z = g.get_tensor_by_name('net/encoder/variational/z:0')
G = g.get_tensor_by_name('net/generator/x_tilde:0')
# Let's get some data to play with:
files = datasets.CELEB()
img_i = 50
img = plt.imread(files[img_i])
plt.imshow(img)
plt.title("some celeb")
wait(1)
# Now preprocess the image, and see what the generated image looks
# like (i.e. the lossy version of the image through the network's
# encoding and decoding).
p = CV.preprocess(img)
synth = sess.run(G, feed_dict={X: p[np.newaxis]})
#fig, axs = plt.subplots(1, 2, figsize=(10, 5))
#axs[0].imshow(p)
plt.imshow(synth[0] / synth.max())
plt.title("lossy version")
wait(1)
# So we lost a lot of details but it seems to be able to express
# quite a bit about the image. Our inner most layer, `Z`, is only 512
# values yet our dataset was 200k images of 64 x 64 x 3 pixels (about
# 2.3 GB of information). That means we're able to express our nearly
# 2.3 GB of information with only 512 values! Having some loss of
# detail is certainly expected!
#
# <a name="exploring-the-celeb-net-attributes"></a>
# ## Exploring the Celeb Net Attributes
#
# Let's now try and explore the attributes of our dataset. We didn't
# train the network with any supervised labels, but the Celeb Net
# dataset has 40 attributes for each of its 200k images. These are
# already parsed and stored for you in the `net` dictionary:
print("net keys: ", net.keys())
len(net['labels'])
print("net labels: ", net['labels'])
# Let's see what attributes exist for one of the celeb images:
plt.title("attributes")
plt.imshow(img)
print("attributes of ", img_i)
#[net['labels'][i] for i, attr_i in enumerate(net['attributes'][img_i]) if attr_i]
for i, attr_i in enumerate(net['attributes'][img_i]):
if attr_i:
print(i, net['labels'][i])
wait(1)
#
# Find the Latent Encoding for an Attribute
#
# The Celeb Dataset includes attributes for each of its 200k+ images.
# This allows us to feed into the encoder some images that we know
# have a *specific* attribute, e.g. "smiling". We store what their
# encoding is and retain this distribution of encoded values. We can
# then look at any other image and see how it is encoded, and
# slightly change the encoding by adding the encoded of our smiling
# images to it! The result should be our image but with more smiling.
# That is just insane and we're going to see how to do it. First lets
# inspect our latent space:
print("Z shape: ", Z.get_shape())
# We have 512 features that we can encode any image with. Assuming
# our network is doing an okay job, let's try to find the `Z` of the
# first 100 images with the 'Bald' attribute:
bald_label = net['labels'].index('Bald')
print("bald_label: ", bald_label)
# Let's get all the bald image indexes:
bald_img_idxs = np.where(net['attributes'][:, bald_label])[0]
print("bald img idxs: ", bald_img_idxs)
print("bald idxs len: ", len(bald_img_idxs))
# Now let's just load 100 of their images:
print("bald #100: ", bald_img_idxs[99])
bald_imgs = [plt.imread(files[bald_img_i])[..., :3]
for bald_img_i in bald_img_idxs[:100]]
print("bald imgs len: ", len(bald_imgs))
# Let's see if the mean image looks like a good bald person or not:
plt.title("bald person")
plt.imshow(np.mean(bald_imgs, 0).astype(np.uint8))
wait(1)
# Yes that is definitely a bald person. Now we're going to try to
# find the encoding of a bald person. One method is to try and find
# every other possible image and subtract the "bald" person's latent
# encoding. Then we could add this encoding back to any new image and
# hopefully it makes the image look more bald. Or we can find a bunch
# of bald people's encodings and then average their encodings
# together. This should reduce the noise from having many different
# attributes, but keep the signal pertaining to the baldness.
#
# Let's first preprocess the images:
bald_p = np.array([CV.preprocess(bald_img_i) for bald_img_i in bald_imgs])
# Now we can find the latent encoding of the images by calculating
# `Z` and feeding `X` with our `bald_p` images:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
bald_zs = sess.run(Z, feed_dict={X: bald_p}) # dja
# Now let's calculate the mean encoding:
bald_feature = np.mean(bald_zs, 0, keepdims=True)
print("bald feature shape: ", bald_feature.shape)
# Let's try and synthesize from the mean bald feature now and see how
# it looks:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
bald_generated = sess.run(G, feed_dict={Z: bald_feature}) # dja
plt.title("bald generated")
plt.imshow(bald_generated[0] / bald_generated.max())
wait(1)
#
# Latent Feature Arithmetic
#
# Let's now try to write a general function for performing everything
# we've just done so that we can do this with many different
# features. We'll then try to combine them and synthesize people with
# the features we want them to have...
def get_features_for(label='Bald', has_label=True, n_imgs=50):
label_i = net['labels'].index(label)
label_idxs = np.where(net['attributes'][:, label_i] == has_label)[0]
label_idxs = np.random.permutation(label_idxs)[:n_imgs]
imgs = [plt.imread(files[img_i])[..., :3]
for img_i in label_idxs]
preprocessed = np.array([CV.preprocess(img_i) for img_i in imgs])
zs = sess.run(Z, feed_dict={X: preprocessed})
return np.mean(zs, 0)
# Let's try getting some attributes positive and negative features.
# Be sure to explore different attributes! Also try different values
# of `n_imgs`, e.g. 2, 3, 5, 10, 50, 100. What happens with different
# values?
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# Explore different attributes
z1 = get_features_for('Male', True, n_imgs=10)
z2 = get_features_for('Male', False, n_imgs=10)
z3 = get_features_for('Smiling', True, n_imgs=10)
z4 = get_features_for('Smiling', False, n_imgs=10)
b1 = sess.run(G, feed_dict={Z: z1[np.newaxis]})
b2 = sess.run(G, feed_dict={Z: z2[np.newaxis]})
b3 = sess.run(G, feed_dict={Z: z3[np.newaxis]})
b4 = sess.run(G, feed_dict={Z: z4[np.newaxis]})
plt.close()
fig, axs = plt.subplots(1, 5, figsize=(9, 4))
plt.suptitle("male / not male / smile / not smile")
axs[0].imshow(b1[0] / b1.max()), axs[0].grid('off'), axs[0].axis('off')
axs[1].imshow(b2[0] / b2.max()), axs[1].grid('off'), axs[1].axis('off')
axs[2].imshow(b3[0] / b3.max()), axs[2].grid('off'), axs[2].axis('off')
axs[3].imshow(b4[0] / b4.max()), axs[3].grid('off'), axs[3].axis('off')
wait(1)
plt.cla()
# Now let's interpolate between the "Male" and "Not Male" categories:
notmale_vector = z2 - z1
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z1 + notmale_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
plt.suptitle("male ... not male")
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# And the same for smiling:
smiling_vector = z3 - z4
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z4 + smiling_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
plt.suptitle("not smile ... smile")
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i] / g[i].max(), 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# There's also no reason why we have to be within the boundaries of
# 0-1. We can extrapolate beyond, in, and around the space.
plt.suptitle("extrapolate")
n_imgs = 5
amt = np.linspace(-1.5, 2.5, n_imgs)
zs = np.array([z4 + smiling_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
#ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
#
# Extensions
#
# [Tom White](https://twitter.com/dribnet), Lecturer at Victoria
# University School of Design, also recently demonstrated an
# alternative way of interpolating using a sinusoidal interpolation.
# He's created some of the most impressive generative images out
# there and luckily for us he has detailed his process in the arxiv
# preprint: https://arxiv.org/abs/1609.04468 - as well, be sure to
# check out his twitter bot, https://twitter.com/smilevector - which
# adds smiles to people :) - Note that the network we're using is
# only trained on aligned faces that are frontally facing, though
# this twitter bot is capable of adding smiles to any face. I suspect
# that he is running a face detection algorithm such as AAM, CLM, or
# ASM, cropping the face, aligning it, and then running a similar
# algorithm to what we've done above. Or else, perhaps he has trained
# a new model on faces that are not aligned. In any case, it is well
# worth checking out!
#
# Let's now try and use sinusoidal interpolation using his
# implementation in
# [plat](https://github.com/dribnet/plat/blob/master/plat/interpolate.py#L16-L24)
# which I've copied below:
def slerp(val, low, high):
# Spherical interpolation. val has a range of 0 to 1.
if val <= 0:
return low
elif val >= 1:
return high
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
plt.suptitle("sinusoidal interp")
amt = np.linspace(0, 1, n_imgs)
zs = np.array([slerp(amt_i, z1, z2) for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# It's certainly worth trying especially if you are looking to
# explore your own model's latent space in new and interesting ways.
#
# Let's try and load an image that we want to play with. We need an
# image as similar to the Celeb Dataset as possible. Unfortunately,
# we don't have access to the algorithm they used to "align" the
# faces, so we'll need to try and get as close as possible to an
# aligned face image. One way you can do this is to load up one of
# the celeb images and try and align an image to it using e.g.
# Photoshop or another photo editing software that lets you blend and
# move the images around. That's what I did for my own face...
img = plt.imread('parag.png')[..., :3]
img = CV.preprocess(img, crop_factor=1.0)[np.newaxis]
# Let's see how the network encodes it:
plt.suptitle("blurry Parag")
img_ = sess.run(G, feed_dict={X: img})
#fig, axs = plt.subplots(1, 2, figsize=(10, 5))
plt.cla()
for i, ax_i in enumerate(axs):
ax_i.cla()
ax_i.grid('off')
ax_i.axis('off')
axs[0].imshow(img[0])
axs[1].imshow(np.clip(img_[0] / np.max(img_), 0, 1))
wait(1)
plt.cla()
# Notice how blurry the image is. Tom White's preprint suggests one
# way to sharpen the image is to find the "Blurry" attribute vector:
z1 = get_features_for('Blurry', True, n_imgs=25)
z2 = get_features_for('Blurry', False, n_imgs=25)
unblur_vector = z2 - z1
z = sess.run(Z, feed_dict={X: img})
plt.suptitle("unblur vector")
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i] / g[i].max(), 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# Notice that the image also gets brighter and perhaps other features
# than simply the bluriness of the image changes. Tom's preprint
# suggests that this is due to the correlation that blurred images
# have with other things such as the brightness of the image,
# possibly due biases in labeling or how photographs are taken. He
# suggests that another way to unblur would be to synthetically blur
# a set of images and find the difference in the encoding between the
# real and blurred images. We can try it like so:
from scipy.ndimage import gaussian_filter
idxs = np.random.permutation(range(len(files)))
imgs = [plt.imread(files[idx_i]) for idx_i in idxs[:100]]
blurred = []
for img_i in imgs:
img_copy = np.zeros_like(img_i)
for ch_i in range(3):
img_copy[..., ch_i] = gaussian_filter(img_i[..., ch_i], sigma=3.0)
blurred.append(img_copy)
# Now let's preprocess the original images and the blurred ones
imgs_p = np.array([CV.preprocess(img_i) for img_i in imgs])
blur_p = np.array([CV.preprocess(img_i) for img_i in blurred])
# And then compute each of their latent features
noblur = sess.run(Z, feed_dict={X: imgs_p})
blur = sess.run(Z, feed_dict={X: blur_p})
synthetic_unblur_vector = np.mean(noblur - blur, 0)
plt.suptitle("synthetic unblur vector")
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + synthetic_unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# For some reason, it also doesn't like my glasses very much. Let's
# try and add them back.
z1 = get_features_for('Eyeglasses', True)
z2 = get_features_for('Eyeglasses', False)
glass_vector = z1 - z2
z = sess.run(Z, feed_dict={X: img})
plt.suptitle("glass vector")
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + glass_vector * amt_i + unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# Well, more like sunglasses then. Let's try adding everything in
# there now!
plt.suptitle("everything")
n_imgs = 5
amt = np.linspace(0, 1.0, n_imgs)
zs = np.array([z[0] + glass_vector * amt_i + unblur_vector * amt_i + amt_i * smiling_vector for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# Well it was worth a try anyway. We can also try with a lot of
# images and create a gif montage of the result:
print("creating montage...")
n_imgs = 5
amt = np.linspace(0, 1.5, n_imgs)
z = sess.run(Z, feed_dict={X: imgs_p})
imgs = []
for amt_i in amt:
zs = z + synthetic_unblur_vector * amt_i + amt_i * smiling_vector
g = sess.run(G, feed_dict={Z: zs})
m = utils.montage(np.clip(g, 0, 1))
imgs.append(m)
gif.build_gif(imgs, saveto='celeb_unblur_smile.gif', interval=0.2)
#ipyd.Image(url='celeb.gif?i={}'.format(np.random.rand()), height=1000, width=1000)
# Exploring multiple feature vectors and applying them to images from
# the celeb dataset to produce animations of a face, saving it as a
# GIF. Recall you can store each image frame in a list and then use
# the `gif.build_gif` function to create a gif. Explore your own
# syntheses and then include a gif of the different images you create
# as "celeb.gif" in the final submission. Perhaps try finding
# unexpected synthetic latent attributes in the same way that we
# created a blur attribute. You can check the documentation in
# scipy.ndimage for some other image processing techniques, for
# instance: http://www.scipy-lectures.org/advanced/image_processing/
# - and see if you can find the encoding of another attribute that
# you then apply to your own images. You can even try it with many
# images and use the `utils.montage` function to create a large grid
# of images that evolves over your attributes. Or create a set of
# expressions perhaps. Up to you just explore!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
#... DO SOMETHING AWESOME ! ... #
#dja
#imgs = []
#gif.build_gif(imgs=imgs, saveto='vaegan.gif')
wait(1)
# Please visit [session-5-part2.ipynb](session-5-part2.ipynb) for the
# rest of the homework!
# eop
| apache-2.0 |
justincassidy/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
seanpquinn/augerta | web_monitor/unhex_and_sort_mar2017_v4_catchup.py | 1 | 24486 | # Copyright (c) Case Western Reserve University 2017
# This software is distributed under Apache License 2.0
# Consult the file LICENSE.txt
# Author: Sean Quinn spq@case.edu
# Mar 23 2017
import binascii
import bz2
import struct
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import subprocess as sp
import time
import sys
import scipy.signal as spysig
"""Small Python3 script that parses the master T3.out list. The file contains
all the relevant T3 data: Event ID, GPS time, Trigger type, and FADC traces for the
3 PMTs. Ricardo Sato has written a program "x2" which recovers the trace data
only from a decompressed T3 message. This script performs the following function
1.) Isolates individual events in the master list
2.) Converts ASCII hex data to raw binary
2.) Decompresses (bz2 format) T3 data message (still raw binary)
3.) Creates a folder for individual events with following name format
GPSTIME_MICROSECOND
Example: 1117640005_616863
4.) Places output of x2 program, which is an ASCII text file containing the
PMT traces (presumably mV levels for dynode/anode?)
"""
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.array(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return(z, p, ia[i])
def save_calib(bytes):
f = open('calib_info.txt','w')
calsize = struct.unpack('>I',bytes[8212:8216])[0]
if calsize == 0:
calsize = struct.unpack('>I',bytes[8216:8220])[0]
if calsize == 84 or calsize == 104:
f.write('Version {}\n'.format(struct.unpack('>H',bytes[8220:8222])[0]))
f.write('TubeMask {}\n'.format(struct.unpack('>H',bytes[8222:8224])[0]))
f.write('StartSecond {}\n'.format(struct.unpack('>I',bytes[8224:8228])[0]))
f.write('EndSecond {}\n'.format(struct.unpack('>I',bytes[8228:8232])[0]))
f.write('NbT1 {}\n'.format(struct.unpack('>H',bytes[8232:8234])[0]))
f.write('NbT2 {}\n'.format(struct.unpack('>H',bytes[8234:8236])[0]))
evol = [0,0,0] # Last 8 minutes of calibration evolution
for i in range(3):
evol[i] = struct.unpack('>H',bytes[8236+2*i:8236+2*(i+1)])[0]
f.write('Evolution {0} {1} {2}\n'.format(*evol))
# Finish at 8242
dynode_base = [0,0,0]
for i in range(3):
dynode_base[i] = struct.unpack('>H',bytes[8242+2*i:8242+2*(i+1)])[0]*0.01
f.write('Dynode Base {0:.3f} {1:.3f} {2:.3f}\n'.format(*dynode_base))
# Finish at 8248
anode_base = [0,0,0]
for i in range(3):
anode_base[i] = struct.unpack('>H',bytes[8248+2*i:8248+2*(i+1)])[0]*0.01
f.write('Anode Base {0} {1} {2}\n'.format(*anode_base))
#Finish at 8254
dynode_base_var = [0,0,0]
for i in range(3):
dynode_base_var[i] = struct.unpack('>H',bytes[8254+2*i:8254+2*(i+1)])[0]*0.01
f.write('Dynode Base Var {0} {1} {2}\n'.format(*dynode_base_var))
# Finish at 8260
anode_base_var = [0,0,0]
for i in range(3):
anode_base_var[i] = struct.unpack('>H',bytes[8260+2*i:8260+2*(i+1)])[0]*0.01
f.write('Anode Base Var {0} {1} {2}\n'.format(*anode_base_var))
#Finish at 8266
block = ['VemPeak','Rate','NbTDA','DA','SigmaDA','VemCharge']
vem_peak = [0,0,0]
for i in range(3):
vem_peak[i] = struct.unpack('>H',bytes[8266+2*i:8266+2*(i+1)])[0]*0.1
f.write('VemPeak ' + '{0} {1} {2}\n'.format(*vem_peak))
# Finish at 8272
rate70Hz = [0,0,0]
for i in range(3):
rate70Hz[i] = struct.unpack('>H',bytes[8272+2*i:8272+2*(i+1)])[0]*0.01
f.write('70 Hz Rate ' + '{0} {1} {2}\n'.format(*rate70Hz))
# Finish at 8278
trigger_DA = [0,0,0]
for i in range(3):
trigger_DA[i] = struct.unpack('>H',bytes[8278+2*i:8278+2*(i+1)])[0]
f.write('Trigger D/A ' + '{0} {1} {2}\n'.format(*trigger_DA))
# Finish at 8284
DA = [0,0,0]
for i in range(3):
DA[i] = struct.unpack('>H',bytes[8284+2*i:8284+2*(i+1)])[0]*0.01
f.write('D/A ' + '{0} {1} {2}\n'.format(*DA))
# Finish at 8290
DA_var = [0,0,0]
for i in range(3):
DA_var[i] = struct.unpack('>H',bytes[8290+2*i:8290+2*(i+1)])[0]*0.01
f.write('D/A var ' + '{0} {1} {2}\n'.format(*DA_var))
# Finish at 8296
Area = [0,0,0]
for i in range(3):
Area[i] = struct.unpack('>H',bytes[8296+2*i:8296+2*(i+1)])[0]*0.1
f.write('VemCharge ' + '{0} {1} {2}\n'.format(*Area))
# Finish at 8302
totRate = struct.unpack('>H',bytes[8302:8304])[0]*0.01
f.write('TotRate ' + '{0}\n'.format(totRate))
#Finish at 8304
f.write('NbTOT {}\n'.format(struct.unpack('>H',bytes[8302:8304])[0]))
if calsize == 104:
block = ['DADt','SigmaDADt','DAChi2']
for i in range(3):
vals = [0,0,0]
for j in range(3):
vals[j]=struct.unpack('>H',bytes[8266+2*(3*i+j):8266+2*(3*i+j+1)])[0]/100.
f.write(block[i]+' '+'{0} {1} {2}\n'.format(*vals))
else:
f.write("BAD COMPRESS\n")
f.close()
else:
f.write("BAD COMPRESS\n")
f.close()
f.close()
return calsize
def save_mon(bytes,si):
#si is fondly known as start index
si += 8220
mon_size = struct.unpack('>I',bytes[si:si+4])[0]
si += 4
if mon_size == 6080:
f = open('mon_hist_offset.txt','w')
offsets = np.zeros(10,dtype=int)
for i in range(10):
val = struct.unpack('>H',bytes[si+2*i:si+2*(i+1)])[0]
offsets[i] = val
f.write('{}\n'.format(val))
si += 2 * 10
f.close()
# -------------BASELINE HISTOGRAMS-------------
pmt_base = np.zeros((20,6),dtype=int)
pmt_base[:,0] = np.arange(offsets[0],offsets[0]+20)
pmt_base[:,2] = np.arange(offsets[1],offsets[1]+20)
pmt_base[:,4] = np.arange(offsets[2],offsets[2]+20)
tmp_labels=['','PMT 1','','PMT 2','','PMT 3']
for j in [1,3,5]:
for i in range(20):
pmt_base[i,j] = struct.unpack('>H',bytes[si+2*i:si+2*(i+1)])[0]
si += 2*20
plt.step(pmt_base[:,j-1],pmt_base[:,j],where='pre',label=tmp_labels[j])
plt.xlabel('FADC channels')
plt.ylabel('Counts')
plt.title('Baseline histograms')
plt.legend()
plt.savefig('mon_hist_base.png')
plt.close('all')
np.savetxt('mon_hist_base.txt',pmt_base,fmt="%i")
# -------------PULSE HEIGHT HISTOGRAMS-------------
mon_peak = np.zeros((150,6),dtype=int)
mon_peak[:,0] = np.arange(offsets[3],offsets[3]+150)
mon_peak[:,2] = np.arange(offsets[4],offsets[4]+150)
mon_peak[:,4] = np.arange(offsets[5],offsets[5]+150)
tmp_labels=['','PMT 1','','PMT 2','','PMT 3']
for j in [1,3,5]:
for i in range(150):
mon_peak[i,j] = struct.unpack('>H',bytes[si+2*i:si+2*(i+1)])[0]
si += 2*150
lab = tmp_labels[j]
plt.step(mon_peak[:,j-1],mon_peak[:,j],where='pre',label=lab)
plt.xlabel('FADC channels')
plt.ylabel('Counts')
plt.title('Pulse height histograms')
plt.legend()
plt.savefig('mon_hist_pulse_height.png')
plt.close('all')
np.savetxt('mon_hist_pulse_height.txt',mon_peak,fmt="%i")
# -------------CHARGE HISTOGRAMS-------------
mon_charge = np.zeros((600,8),dtype=int)
mon_charge[:,0] = np.arange(offsets[6],offsets[6]+600)
mon_charge[:,2] = np.arange(offsets[7],offsets[7]+600)
mon_charge[:,4] = np.arange(offsets[8],offsets[8]+600)
mon_charge[:,6] = np.arange(offsets[9],offsets[9]+600)
tmp_labels=['','PMT 1','','PMT 2','','PMT 3','','PMT SUM']
for j in [1,3,5,7]:
for i in range(600):
mon_charge[i,j] = struct.unpack('>H',bytes[si+2*i:si+2*(i+1)])[0]
si += 2*600
lab = tmp_labels[j]
if j != 7:
plt.step(mon_charge[:,j-1],mon_charge[:,j],where='pre',label=lab)
plt.xlabel('FADC channels')
plt.ylabel('Counts')
plt.title('Charge histograms')
plt.legend()
plt.savefig('mon_hist_charge.png')
plt.close('all')
np.savetxt('mon_hist_charge.txt',mon_charge,fmt="%i")
plt.step(mon_charge[:,6],mon_charge[:,7],where='pre',label=tmp_labels[-1])
plt.xlabel('FADC channels')
plt.ylabel('Counts')
plt.title('Sum charge histogram')
plt.legend()
plt.savefig('mon_hist_charge_sum.png')
plt.close('all')
# -------------SHAPE HISTOGRAMS-------------
mon_shape = np.zeros((20,4),dtype=int)
mon_shape[:,0] = np.arange(0,500,25)
for j in range(1,4):
for i in range(20):
mon_shape[i,j] = struct.unpack('>I',bytes[si+4*i:si+4*(i+1)])[0]
si += 4*20
plt.step(mon_shape[:,0],mon_shape[:,j],where='pre',label='PMT %i' %j)
plt.xlabel('FADC bins [25 ns]')
plt.ylabel('Counts')
plt.title('PMT Shape')
plt.legend()
plt.savefig('mon_hist_pmt_shape.png')
plt.close('all')
np.savetxt('mon_hist_pmt_shape.txt',mon_shape,fmt="%i")
elif mon_size == 0:
return 0
else:
f = open('BAD_MON_COMPRESS','w')
f.close()
return mon_size
return si
def save_gps(bytes,si):
gpssize = struct.unpack('>I',bytes[si:si+4])[0]
si += 4
block = ['Current100','Next100','Current40','Next40','PreviousST',
'CurrentST','NextST']
f = open('gps_info.txt','w')
for i in range(7):
val = struct.unpack('>I',bytes[si+4*i:si+4*(i+1)])[0]
f.write(block[i]+' '+'{}\n'.format(val))
si += 4*7
if gpssize == 30:
val = struct.unpack('>H',bytes[si:si+2])[0]
f.write('Offset {}'.format(val))
f.close()
def find_baseline(x,y):
"""Determine baseline from ADC trace.
Looks at a subsample (0-125) of pretrigger ADC counts.
The bin count with the largest weight is taken as the baseline.
Algorithm is based on GAP2016_044.
Parameters
----------
x,y : array_like
Dynode and anode channel arrays. Converts to int.
Returns
-------
a,b : array_like
The baseline values for dynode and anode, respectively
c,d,e,f : scalars
Start bins for dynode and anode, stop bins for dynode and anode
"""
dyn = x.astype(int)
ano = y.astype(int)
sigma = 2
#Find high gain baseline pieces
dyn_b = np.zeros(768)
#Determine most likely baseline
binval = np.arange(dyn.min(),dyn.min()+5)
counts = np.array([len(dyn[dyn==i]) for i in binval])
likely_base = binval[counts.argmax()]
for i in range(768):
if abs(dyn[i] - likely_base) < 2:
dyn_b[i] = 1
num_vals,start,val = rle(dyn_b)
base_i = np.where(val==1)[0]
num_vals,start=num_vals[base_i],start[base_i]
n_pieces = len(num_vals)
for i in range(n_pieces):
delta = num_vals[i]
if delta > 10:
base_mean = dyn[start[i]:start[i]+num_vals[i]].mean()
dyn_b[start[i]:start[i]+num_vals[i]] = base_mean
else:
dyn_b[start[i]:start[i]+num_vals[i]] = 0
#Interpolate between pieces
zeros = np.where(dyn_b == 0.)[0]
logical = np.zeros(768,dtype=bool)
logical[zeros] = True
tz = lambda z: z.nonzero()[0]
#Interp might fail in some situations
try:
dyn_b[logical] = np.interp(tz(logical),tz(~logical),dyn_b[~logical])
except:
if len(zeros) > 0:
dyn_b[logical] = dyn_b[760]
#Signal start search
dyn2 = dyn-dyn_b
dyn_start = 150 #Default in case problems
for i in range(100,768-1):
w0 = dyn2[i]
w1 = dyn2[i+1]
if w0 > 10 and w1 > 10:
dyn_start = i - 2
break
#Signal stop search
dyn_finish = 350 #Default in case of problems
#Don't care about spurious muons near end either
for i in range(767,dyn_start,-1):
w0 = dyn2[i]
if w0 > 4 and i < 400:
dyn_finish = i + 10
break
ano_b = np.zeros(768)
#Determine most likely baseline
binval = np.arange(ano.min(),ano.min()+5)
counts = np.array([len(ano[ano==i]) for i in binval])
likely_base = binval[counts.argmax()]
for i in range(768):
if abs(ano[i] - likely_base) < 2:
ano_b[i] = 1
num_vals,start,val = rle(ano_b)
base_i = np.where(val==1)[0]
num_vals,start=num_vals[base_i],start[base_i]
n_pieces = len(num_vals)
for i in range(n_pieces):
delta = num_vals[i]
if delta > 10:
base_mean = ano[start[i]:start[i]+num_vals[i]].mean()
ano_b[start[i]:start[i]+num_vals[i]] = base_mean
else:
ano_b[start[i]:start[i]+num_vals[i]] = 0
#Interpolate between pieces
zeros = np.where(ano_b == 0.)[0]
logical = np.zeros(768,dtype=bool)
logical[zeros] = True
tz = lambda z: z.nonzero()[0]
#Interp might fail in some situations
try:
ano_b[logical] = np.interp(tz(logical),tz(~logical),ano_b[~logical])
except:
if len(zeros) > 0:
ano_b[logical] = ano_b[760]
#Signal start search
ano2 = ano-ano_b
ano_start = 150 #Default in case problems
for i in range(100,768-1):
w0 = ano2[i]
w1 = ano2[i+1]
if w0 > 10 and w1 > 10:
ano_start = i - 2
break
#Signal stop search
ano_finish = 350 #Default in case of problems
#Don't care about spurious muons near end either
for i in range(767,ano_start,-1):
w0 = ano2[i]
if w0 > 2 and i < 400:
ano_finish = i + 10
break
if len(np.where(dyn > 1020)[0]) < 2:
ano_start = dyn_start
ano_finish = dyn_finish
return dyn_b,ano_b,dyn_start,ano_start,dyn_finish,ano_finish
def find_vem(p):
"""Determine peak of the charge histogram for PMT `p`.
Loads mon_hist_charge.txt file for input PMT. The histogram
is smoothed using a 45th order 3rd degree polynomial Savitzky-Golay filter.
The second peak of the smoothed signal is selected.
Parameters
----------
p : int
PMT number. I.e. 0,1 or 2 for PMT#1,#2,#3
Returns
-------
y : int
Estimated location of charge histogram peak
"""
xax,yax = np.loadtxt("mon_hist_charge.txt",usecols=(p*2,2*p+1),dtype=int,unpack=True)
xax = xax - xax[0]
Y = spysig.savgol_filter(yax,45,3)
ped_peak = Y[:60].argmax()
q_peak = Y[60:].argmax() + 60
return q_peak,ped_peak
def plot_vem():
fig = plt.figure(figsize=(16,9))
for p in range(3):
xax,yax = np.loadtxt("mon_hist_charge.txt",usecols=(p*2,2*p+1),dtype=int,unpack=True)
xax = xax - xax[0]
Y = spysig.savgol_filter(yax,45,3)
ped_peak = Y[:60].argmax()
q_peak = Y[60:].argmax() + 60
ax = fig.add_subplot(1,3,p+1)
plt.step(xax,yax)
plt.plot(xax,Y)
plt.xlabel('FADC channels')
plt.title('PMT %i charge histogram' %(p+1))
plt.ylim(ymin=0)
ymax = plt.ylim()[1]
plt.vlines(ped_peak,0,ymax)
plt.vlines(q_peak,0,ymax)
s='Pedestal peak = %i\nCharge peak=%i' %(ped_peak,q_peak)
plt.text(0.65,0.75,s,fontsize=10,transform=ax.transAxes)
plt.tight_layout()
plt.savefig('hist_charge_fit.png')
plt.close('all')
def make_plots(evt_num,gps):
# Get *estimated* offsets from calib data
a_base = [0,0,0]
d_base = [0,0,0]
v_peaks = [0,0,0]
da = [0,0,0]
v_charge = [0,0,0]
with open('calib_info.txt','r') as F:
for line in F:
if "Dynode Base" in line and "Var" not in line:
ss = line.split(' ')
for j in range(3):
d_base[j] = float(ss[j+2])
elif "Anode Base" in line and "Var" not in line:
ss = line.split(' ')
for j in range(3):
a_base[j] = float(ss[j+2])
elif "VemPeak" in line:
ss = line.split(' ')
for j in range(3):
v_peaks[j] = float(ss[j+1])
elif "D/A" in line and "var" not in line and "Trigger" not in line:
ss = line.split(' ')
for j in range(3):
da[j] = float(ss[j+1])
elif "VemCharge" in line:
ss = line.split(' ')
for j in range(3):
v_charge[j] = float(ss[j+1])
else:
continue
fadc_hist = np.loadtxt('FADC_trace',dtype=int)
xaxis = np.arange(0,768)
plt.figure(figsize=(19,8))
for i in range(3):
plt.subplot(1,3,i+1)
plt.plot(xaxis,fadc_hist[:,i+1],
drawstyle='steps-pre')
plt.title('PMT {}'.format(i+1))
plt.xlabel(r'Time [25 ns]')
plt.ylabel('ADC counts')
plt.tight_layout()
plt.savefig('dynode_adc.png')
plt.close()
plt.figure(figsize=(19,8))
for i in range(3):
plt.subplot(1,3,i+1)
plt.plot(xaxis,fadc_hist[:,i+4],
drawstyle='steps-pre')
plt.title('PMT {}'.format(i+1))
plt.xlabel(r'Time [25 ns]')
plt.ylabel('ADC counts')
plt.tight_layout()
plt.savefig('anode_adc.png')
plt.close()
# Make Signal plot
sga = [0.]*3
sgd = [0.]*3
f,axs = plt.subplots(nrows=2,ncols=3,sharex='col',sharey='row',figsize=(22,14))
axs[0][0].set_ylabel('ANODE Signal [VEM peak]')
axs[1][0].set_ylabel('DYNODE Signal [VEM peak]')
ano_sat = [0]*3
dyn_sat = [0]*3
for i in range(3):
y=np.empty(0) #Anode y-axis
y2 = np.empty(0) #Dynode y-axis
#Get ADC traces for anode
y = fadc_hist[:,i+4]
max_ano_adc = np.where(y>1020)[0]
#Get ADC traces for dynode
y2 = fadc_hist[:,i+1]
max_dyn_adc = np.where(y2>1020)[0]
#Determine baseline
dyn_b,ano_b,d_start,a_start,d_end,a_end = find_baseline(y2,y)
qvem_peak, pdl_peak = find_vem(i)
#Calculate signals
y_sig = y - ano_b
sga[i] = y_sig[a_start:a_end].sum() / (qvem_peak / 32)
y2_sig = y2 - dyn_b
sgd[i] = y2_sig[d_start:d_end].sum() / qvem_peak
#Put ADC traces into normalized units
y_peak_in_vem = np.max(y_sig / (qvem_peak / 32))
y2_peak_in_vem = np.max(y2_sig / qvem_peak)
y2_max_val = y2_sig.max()
xnew = xaxis[d_start-10:d_end+20]
plot_y = y_sig[d_start-10:d_end+20] * y_peak_in_vem / qvem_peak
plot_y2 = y2_sig[d_start-10:d_end+20] * y2_peak_in_vem / qvem_peak
#Plot anode
axs[0][i].step(xnew,plot_y)
axs[0][i].vlines(a_start,0,1.2*plot_y.max(),linestyle='dashed',color='green')
axs[0][i].vlines(a_end,0,1.2*plot_y.max(),linestyle='dashed',color='green')
axs[1][i].step(xnew,plot_y2)
axs[1][i].vlines(d_start,0,1.2*plot_y2.max(),linestyle='dashed',color='green')
axs[1][i].vlines(d_end,0,1.2*plot_y2.max(),linestyle='dashed',color='green')
axs[1][i].set_xlabel(r'Time [25 ns]')
boxstr = 'S=%.1f VEM\nD/A=%.1f\nVEM Charge=%.1f\nVEM Peak=%.1f' %(sga[i],da[i],v_charge[i],v_peaks[i])
boxstr2 = 'S=%.1f VEM\nD/A=%.1f\nVEM Charge=%.1f\nVEM Peak=%.1f' %(sgd[i],da[i],v_charge[i],v_peaks[i])
axs[0][i].text(0.7,0.75,boxstr,fontsize=10,transform=axs[0][i].transAxes)
axs[1][i].text(0.7,0.75,boxstr2,fontsize=10,transform=axs[1][i].transAxes)
axs[0][i].set_title('PMT %i' %(i+1))
axs[0][i].set_xlim(xmin=xnew.min(),xmax=xnew.max())
axs[1][i].set_xlim(xmin=xnew.min(),xmax=xnew.max())
if len(max_ano_adc) > 2:
axs[0][i].text(0.1,0.75,'SATURATED',color='red',fontsize=10,transform=axs[0][i].transAxes)
ano_sat[i] = 1
if len(max_dyn_adc) > 2:
axs[1][i].text(0.1,0.75,'SATURATED',color='red',fontsize=10,transform=axs[1][i].transAxes)
dyn_sat[i] = 1
plt.tight_layout()
plt.savefig('%i_signal.png' %evt_num)
plt.close('all')
plot_vem()
return "%i %.3f %.3f %.3f %.3f %.3f %.3f %i %i %i %i %i %i" %(gps,sga[0],sga[1],sga[2],sgd[0],sgd[1],sgd[2],ano_sat[0],ano_sat[1],ano_sat[2],dyn_sat[0],dyn_sat[1],dyn_sat[2])
#Read through T3 file collecting events into a large list
#Since writing the original script I've found a more elegant approach
#thanks to inspectorG4dget at stackoverflow
#http://stackoverflow.com/questions/18865058/
"""
date_list = ['20161107',
'20161108',
'20161109',
'20161110',
'20161111',
'20161112',
'20161113',
'20161114',
'20161115',
'20161116',
'20161117',
'20161118',
'20161119',
'20161120',
'20161121',
'20161122',
'20161123',
'20161124',
'20161125',
'20161126',
'20161127',
'20161128',
'20161129',
'20161130',
'20161201',
'20161202',
'20161203',
'20161204',
'20161205',
'20161206',
'20161207',
'20161208',
'20161209',
'20161210',
'20161211',
'20161212',
'20161213',
'20161214',
'20161215',
date_list=['20161216',
'20161217',
'20161218',
'20161219',
'20161220',
'20161221',
'20161222',
'20161223',
'20161224',
'20161225',
'20161226',
'20161227',
'20161228',
'20161229',
'20161230',
'20161231',
'20170101',
'20170102',
'20170103',
'20170104',
'20170105',
'20170106',
'20170107',
'20170108',
'20170109',
'20170110',
'20170111',
'20170112',
'20170114',
'20170117',
'20170118',
'20170119',
'20170120',
'20170121',
'20170122',
'20170123',
'20170127',
'20170128',
'20170129',
'20170130',
'20170131',
'20170201',
'20170202',
'20170203',
'20170204',
'20170205',
'20170206',
'20170207',
'20170208',
'20170209',
'20170210',
'20170211',
'20170212',
'20170213',
'20170214',
'20170215',
'20170216',
'20170217',
'20170218',
'20170219',
'20170220',
'20170221',
'20170222',
'20170223',
'20170224',
'20170225',
'20170226',
'20170227',
'20170228',
'20170301',
'20170302',
'20170303',
'20170304',
'20170305',
'20170306',
'20170307',
'20170308',
'20170309',
'20170310',
'20170311',
'20170312',
'20170313',
'20170314',
'20170315',
'20170316',
'20170317',
'20170318',
'20170319',
'20170320',
'20170321',]
"""
date_list = ['20170330',
'20170331',
'20170401',
'20170402',
'20170403',
'20170404',]
for date in date_list:
t3list = []
num_evts = 0
print("Reading T3 event file ...")
fdate = date
yr = int(fdate[:4])
mo = int(fdate[4:6])
dy = int(fdate[6:])
os.chdir('/home/augta/web_monitor/tmp')
fname = "%i_%02d_%02d" %(yr,mo,dy)
sp.call(['cp','/home/augta/data/south/t3/%s.T3.gz' %fname,'.'])
sp.call(['gunzip',"%s.T3.gz" %fname])
filename = "%s.T3" %fname
with open(filename,'r') as t3file:
copy = False
for line in t3file:
if line.strip() == "Event ...":
copy = True
data_str = ''
num_evts += 1
elif line.strip() == "----------":
copy = False
#Avoid clipped data streams. Compressed event should be more than 10kB
if len(data_str)>9000:
t3list.append(data_str)
elif copy:
data_str += line.strip().replace(' ','')
time.sleep(0.5)
#print("[ OK ]")
#print("Found {} events".format(num_evts))
evt_count = 1
signal_data = []
for t3 in t3list:
print("Event %i of %i" %(evt_count,len(t3list)))
evt_id = int(t3[:4],16)
error_code = int(t3[4:6],16) #Value of 1 indicates no error for T3
packed=binascii.unhexlify(bytes(t3[8:],'ascii'))
dec_t3 = bz2.decompress(packed)
# Now that we have uncompressed message let's get some information
# The PowerPC hardware uses big endian format
gps_YMDHMnS = struct.unpack('>I', dec_t3[:4])[0] #First 4 bytes are GPS sec
gps_TICK = struct.unpack('>I', dec_t3[4:8])[0] #Next 4 are GPS clock cycles
try:
os.mkdir("{0}_{1}".format(evt_id,gps_YMDHMnS))
except OSError as e:
#Catch folder already exists error
if e.errno==17:
print("This event has already been unpacked and saved, skipping ...")
continue
os.chdir('{0}_{1}'.format(evt_id,gps_YMDHMnS))
f=open('T3_{}.bin'.format(evt_id), 'bw')
f.write(dec_t3)
f.close()
sp.call(["../../x2", "T3_{}.bin".format(evt_id)])
monstart = save_calib(dec_t3)
gpsstart = save_mon(dec_t3,monstart)
save_gps(dec_t3,gpsstart)
signal_data.append(make_plots(evt_count,gps_YMDHMnS))
evt_count += 1
os.chdir('..')
sp.call(['rm',filename])
dirlist = os.listdir('.')
dirlist.sort()
sp.call(['mkdir','/var/www/html/monitor/data/global_south/%s' %fname])
sp.call(['mkdir','/var/www/html/monitor/data/local_south/%s' %fname])
sp.call(['cp',"/home/augta/data/coincidence/%s.CTAG.gz" %fname,
'/var/www/html/monitor/data/global_south/%s' %fname])
sp.call(['cp',"/home/augta/data/coincidence/%s.CTAL.gz" %fname,
'/var/www/html/monitor/data/local_south/%s' %fname])
sp.call(['cp',"/home/augta/data/coincidence/%s.CTAG.gz" %fname,'.'])
global_gps = np.loadtxt('%s.CTAG.gz'%fname,usecols=(6,),dtype='S500',
comments=None)
sp.call(['rm',"%s.CTAG.gz" %fname])
num_glob = global_gps.size
dirlist_gpsonly = []
for j in dirlist:
dirlist_gpsonly.append(j.split('_')[1])
# Make list of GPS seconds from signal data list
signal_data_gpsonly = []
for j in signal_data:
signal_data_gpsonly.append(j.split(' ')[0])
# Locate and move all global events
#Also, write event data to global text file
if num_glob > 0:
if num_glob == 1:
new_glob = np.zeros(1,dtype='S500')
new_glob[0] = global_gps
global_gps = new_glob
for g in global_gps:
gps_sec = g.decode('ascii').split('.')[0]
try:#Event might not have associated T3 (sad, but happens rarely it seems)
fold_ind = dirlist_gpsonly.index(gps_sec)
except:#Skip to next event
continue
d = dirlist[fold_ind]
sp.call(['mv',d+'/','/var/www/html/monitor/data/global_south/%s/' %fname])
data_ind = signal_data_gpsonly.index(gps_sec)
s = signal_data[data_ind]
with open('/home/augta/web_monitor/south_global_signal.txt','a') as f:
f.write(s+'\n')
# Get new dirlist which should, in principle, contain only local events
dirlist = os.listdir('.')
dirlist.sort()
dirlist_gpsonly = []
for j in dirlist:
dirlist_gpsonly.append(j.split('_')[1])
N = len(dirlist)
for i in range(N):
d = dirlist[i]
sp.call(['mv',d+'/','/var/www/html/monitor/data/local_south/%s/' %fname])
gps_sec = dirlist_gpsonly[i]
data_ind = signal_data_gpsonly.index(gps_sec)
s = signal_data[data_ind]
with open('/home/augta/web_monitor/south_local_signal.txt','a') as f:
f.write(s+'\n')
| apache-2.0 |
PanDAWMS/panda-bigmon-core-old | core/common/models.py | 1 | 139296 | # Create your models here.
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from ..pandajob.columns_config import COLUMNS, ORDER_COLUMNS, COL_TITLES, FILTERS
from django.db import models
models.options.DEFAULT_NAMES += ('allColumns', 'orderColumns', \
'primaryColumns', 'secondaryColumns', \
'columnTitles', 'filterFields',)
class Cache(models.Model):
type = models.CharField(db_column='TYPE', max_length=250)
value = models.CharField(db_column='VALUE', max_length=250)
qurl = models.CharField(db_column='QURL', max_length=250)
modtime = models.DateTimeField(db_column='MODTIME')
usetime = models.DateTimeField(db_column='USETIME')
updmin = models.IntegerField(null=True, db_column='UPDMIN', blank=True)
data = models.TextField(db_column='DATA', blank=True)
class Meta:
db_table = u'cache'
class Certificates(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
cert = models.CharField(max_length=12000, db_column='CERT')
class Meta:
db_table = u'certificates'
class Classlist(models.Model):
class_field = models.CharField(max_length=90, db_column='CLASS', primary_key=True) # Field renamed because it was a Python reserved word.
name = models.CharField(max_length=180, db_column='NAME', primary_key=True)
rights = models.CharField(max_length=90, db_column='RIGHTS')
priority = models.IntegerField(null=True, db_column='PRIORITY', blank=True)
quota1 = models.BigIntegerField(null=True, db_column='QUOTA1', blank=True)
quota7 = models.BigIntegerField(null=True, db_column='QUOTA7', blank=True)
quota30 = models.BigIntegerField(null=True, db_column='QUOTA30', blank=True)
class Meta:
db_table = u'classlist'
unique_together = ('class_field', 'name')
class Cloudconfig(models.Model):
name = models.CharField(max_length=60, primary_key=True, db_column='NAME')
description = models.CharField(max_length=150, db_column='DESCRIPTION')
tier1 = models.CharField(max_length=60, db_column='TIER1')
tier1se = models.CharField(max_length=1200, db_column='TIER1SE')
relocation = models.CharField(max_length=30, db_column='RELOCATION', blank=True)
weight = models.IntegerField(db_column='WEIGHT')
server = models.CharField(max_length=300, db_column='SERVER')
status = models.CharField(max_length=60, db_column='STATUS')
transtimelo = models.IntegerField(db_column='TRANSTIMELO')
transtimehi = models.IntegerField(db_column='TRANSTIMEHI')
waittime = models.IntegerField(db_column='WAITTIME')
comment_field = models.CharField(max_length=600, db_column='COMMENT_', blank=True) # Field renamed because it was a Python reserved word.
space = models.IntegerField(db_column='SPACE')
moduser = models.CharField(max_length=90, db_column='MODUSER', blank=True)
modtime = models.DateTimeField(db_column='MODTIME')
validation = models.CharField(max_length=60, db_column='VALIDATION', blank=True)
mcshare = models.IntegerField(db_column='MCSHARE')
countries = models.CharField(max_length=240, db_column='COUNTRIES', blank=True)
fasttrack = models.CharField(max_length=60, db_column='FASTTRACK', blank=True)
nprestage = models.BigIntegerField(db_column='NPRESTAGE')
pilotowners = models.CharField(max_length=900, db_column='PILOTOWNERS', blank=True)
dn = models.CharField(max_length=300, db_column='DN', blank=True)
email = models.CharField(max_length=180, db_column='EMAIL', blank=True)
fairshare = models.CharField(max_length=384, db_column='FAIRSHARE', blank=True)
class Meta:
db_table = u'cloudconfig'
class Cloudspace(models.Model):
cloud = models.CharField(max_length=60, db_column='CLOUD', primary_key=True)
store = models.CharField(max_length=150, db_column='STORE', primary_key=True)
space = models.IntegerField(db_column='SPACE')
freespace = models.IntegerField(db_column='FREESPACE')
moduser = models.CharField(max_length=90, db_column='MODUSER')
modtime = models.DateTimeField(db_column='MODTIME')
class Meta:
db_table = u'cloudspace'
unique_together = ('cloud', 'store')
class Cloudtasks(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
taskname = models.CharField(max_length=384, db_column='TASKNAME', blank=True)
taskid = models.IntegerField(null=True, db_column='TASKID', blank=True)
cloud = models.CharField(max_length=60, db_column='CLOUD', blank=True)
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
tmod = models.DateTimeField(db_column='TMOD')
tenter = models.DateTimeField(db_column='TENTER')
class Meta:
db_table = u'cloudtasks'
class Datasets(models.Model):
vuid = models.CharField(max_length=120, db_column='VUID', primary_key=True)
name = models.CharField(max_length=765, db_column='NAME')
version = models.CharField(max_length=30, db_column='VERSION', blank=True)
type = models.CharField(max_length=60, db_column='TYPE')
status = models.CharField(max_length=30, db_column='STATUS', blank=True)
numberfiles = models.IntegerField(null=True, db_column='NUMBERFILES', blank=True)
currentfiles = models.IntegerField(null=True, db_column='CURRENTFILES', blank=True)
creationdate = models.DateTimeField(null=True, db_column='CREATIONDATE', blank=True)
modificationdate = models.DateTimeField(db_column='MODIFICATIONDATE', primary_key=True)
moverid = models.BigIntegerField(db_column='MOVERID')
transferstatus = models.IntegerField(db_column='TRANSFERSTATUS')
subtype = models.CharField(max_length=15, db_column='SUBTYPE', blank=True)
class Meta:
db_table = u'datasets'
unique_together = ('vuid', 'modificationdate')
class DeftDataset(models.Model):
dataset_id = models.CharField(db_column='DATASET_ID', primary_key=True, max_length=255)
dataset_meta = models.BigIntegerField(db_column='DATASET_META', blank=True, null=True)
dataset_state = models.CharField(db_column='DATASET_STATE', max_length=16, blank=True)
dataset_source = models.BigIntegerField(db_column='DATASET_SOURCE', blank=True, null=True)
dataset_target = models.BigIntegerField(db_column='DATASET_TARGET', blank=True, null=True)
dataset_comment = models.CharField(db_column='DATASET_COMMENT', max_length=128, blank=True)
class Meta:
managed = False
db_table = 'deft_dataset'
class DeftMeta(models.Model):
meta_id = models.BigIntegerField(primary_key=True, db_column='META_ID')
meta_state = models.CharField(max_length=48, db_column='META_STATE', blank=True)
meta_comment = models.CharField(max_length=384, db_column='META_COMMENT', blank=True)
meta_req_ts = models.DateTimeField(null=True, db_column='META_REQ_TS', blank=True)
meta_upd_ts = models.DateTimeField(null=True, db_column='META_UPD_TS', blank=True)
meta_requestor = models.CharField(max_length=48, db_column='META_REQUESTOR', blank=True)
meta_manager = models.CharField(max_length=48, db_column='META_MANAGER', blank=True)
meta_vo = models.CharField(max_length=48, db_column='META_VO', blank=True)
class Meta:
db_table = u'deft_meta'
class DeftTask(models.Model):
task_id = models.BigIntegerField(primary_key=True, db_column='TASK_ID')
task_meta = models.BigIntegerField(null=True, db_column='TASK_META', blank=True)
task_state = models.CharField(max_length=48, db_column='TASK_STATE', blank=True)
task_param = models.TextField(db_column='TASK_PARAM', blank=True)
task_tag = models.CharField(max_length=48, db_column='TASK_TAG', blank=True)
task_comment = models.CharField(max_length=384, db_column='TASK_COMMENT', blank=True)
task_vo = models.CharField(max_length=48, db_column='TASK_VO', blank=True)
task_transpath = models.CharField(max_length=384, db_column='TASK_TRANSPATH', blank=True)
class Meta:
db_table = u'deft_task'
class Dslist(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
duid = models.CharField(max_length=120, db_column='DUID', blank=True)
name = models.CharField(max_length=600, db_column='NAME')
ugid = models.IntegerField(null=True, db_column='UGID', blank=True)
priority = models.IntegerField(null=True, db_column='PRIORITY', blank=True)
status = models.CharField(max_length=30, db_column='STATUS', blank=True)
lastuse = models.DateTimeField(db_column='LASTUSE')
pinstate = models.CharField(max_length=30, db_column='PINSTATE', blank=True)
pintime = models.DateTimeField(db_column='PINTIME')
lifetime = models.DateTimeField(db_column='LIFETIME')
site = models.CharField(max_length=180, db_column='SITE', blank=True)
par1 = models.CharField(max_length=90, db_column='PAR1', blank=True)
par2 = models.CharField(max_length=90, db_column='PAR2', blank=True)
par3 = models.CharField(max_length=90, db_column='PAR3', blank=True)
par4 = models.CharField(max_length=90, db_column='PAR4', blank=True)
par5 = models.CharField(max_length=90, db_column='PAR5', blank=True)
par6 = models.CharField(max_length=90, db_column='PAR6', blank=True)
class Meta:
db_table = u'dslist'
class Etask(models.Model):
taskid = models.IntegerField(primary_key=True, db_column='TASKID')
creationtime = models.DateTimeField(db_column='CREATIONTIME')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
taskname = models.CharField(max_length=768, db_column='TASKNAME', blank=True)
status = models.CharField(max_length=384, db_column='STATUS', blank=True)
username = models.CharField(max_length=768, db_column='USERNAME', blank=True)
usergroup = models.CharField(max_length=96, db_column='USERGROUP', blank=True)
userrole = models.CharField(max_length=96, db_column='USERROLE', blank=True)
actualpars = models.CharField(max_length=6000, db_column='ACTUALPARS', blank=True)
cpucount = models.IntegerField(db_column='CPUCOUNT', blank=True)
cpuunit = models.CharField(max_length=96, db_column='CPUUNIT', blank=True)
diskcount = models.IntegerField(db_column='DISKCOUNT', blank=True)
diskunit = models.CharField(max_length=96, db_column='DISKUNIT', blank=True)
ramcount = models.IntegerField(db_column='RAMCOUNT', blank=True)
ramunit = models.CharField(max_length=96, db_column='RAMUNIT', blank=True)
outip = models.CharField(max_length=9, db_column='OUTIP', blank=True)
tasktype = models.CharField(max_length=96, db_column='TASKTYPE', blank=True)
grid = models.CharField(max_length=96, db_column='GRID', blank=True)
transfk = models.IntegerField(db_column='TRANSFK', blank=True)
transuses = models.CharField(max_length=768, db_column='TRANSUSES', blank=True)
transhome = models.CharField(max_length=768, db_column='TRANSHOME', blank=True)
transpath = models.CharField(max_length=768, db_column='TRANSPATH', blank=True)
transformalpars = models.CharField(max_length=768, db_column='TRANSFORMALPARS', blank=True)
tier = models.CharField(max_length=36, db_column='TIER', blank=True)
ndone = models.IntegerField(db_column='NDONE', blank=True)
ntotal = models.IntegerField(db_column='NTOTAL', blank=True)
nevents = models.BigIntegerField(db_column='NEVENTS', blank=True)
relpriority = models.CharField(max_length=30, db_column='RELPRIORITY', blank=True)
expevtperjob = models.BigIntegerField(db_column='EXPEVTPERJOB', blank=True)
tasktransinfo = models.CharField(max_length=1536, db_column='TASKTRANSINFO', blank=True)
extid1 = models.BigIntegerField(db_column='EXTID1', blank=True)
reqid = models.BigIntegerField(db_column='REQID', blank=True)
expntotal = models.BigIntegerField(db_column='EXPNTOTAL', blank=True)
cmtconfig = models.CharField(max_length=768, db_column='CMTCONFIG', blank=True)
site = models.CharField(max_length=384, db_column='SITE', blank=True)
tasktype2 = models.CharField(max_length=192, db_column='TASKTYPE2', blank=True)
taskpriority = models.IntegerField(db_column='TASKPRIORITY', blank=True)
partid = models.CharField(max_length=192, db_column='PARTID', blank=True)
taskpars = models.CharField(max_length=3072, db_column='TASKPARS', blank=True)
fillstatus = models.CharField(max_length=192, db_column='FILLSTATUS', blank=True)
rw = models.BigIntegerField(db_column='RW', blank=True)
jobsremaining = models.BigIntegerField(db_column='JOBSREMAINING', blank=True)
cpuperjob = models.IntegerField(db_column='CPUPERJOB', blank=True)
class Meta:
db_table = u'etask'
class Filestable4(models.Model):
row_id = models.BigIntegerField(db_column='ROW_ID', primary_key=True)
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
guid = models.CharField(max_length=192, db_column='GUID', blank=True)
lfn = models.CharField(max_length=768, db_column='LFN', blank=True)
type = models.CharField(max_length=60, db_column='TYPE', blank=True)
dataset = models.CharField(max_length=765, db_column='DATASET', blank=True)
status = models.CharField(max_length=192, db_column='STATUS', blank=True)
proddblock = models.CharField(max_length=765, db_column='PRODDBLOCK', blank=True)
proddblocktoken = models.CharField(max_length=750, db_column='PRODDBLOCKTOKEN', blank=True)
dispatchdblock = models.CharField(max_length=765, db_column='DISPATCHDBLOCK', blank=True)
dispatchdblocktoken = models.CharField(max_length=750, db_column='DISPATCHDBLOCKTOKEN', blank=True)
destinationdblock = models.CharField(max_length=765, db_column='DESTINATIONDBLOCK', blank=True)
destinationdblocktoken = models.CharField(max_length=750, db_column='DESTINATIONDBLOCKTOKEN', blank=True)
destinationse = models.CharField(max_length=750, db_column='DESTINATIONSE', blank=True)
fsize = models.BigIntegerField(db_column='FSIZE')
md5sum = models.CharField(max_length=108, db_column='MD5SUM', blank=True)
checksum = models.CharField(max_length=108, db_column='CHECKSUM', blank=True)
scope = models.CharField(max_length=90, db_column='SCOPE', blank=True)
jeditaskid = models.BigIntegerField(null=True, db_column='JEDITASKID', blank=True)
datasetid = models.BigIntegerField(null=True, db_column='DATASETID', blank=True)
fileid = models.BigIntegerField(null=True, db_column='FILEID', blank=True)
attemptnr = models.IntegerField(null=True, db_column='ATTEMPTNR', blank=True)
class Meta:
db_table = u'filestable4'
unique_together = ('row_id', 'modificationtime')
class FilestableArch(models.Model):
row_id = models.BigIntegerField(db_column='ROW_ID', primary_key=True)
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
creationtime = models.DateTimeField(db_column='CREATIONTIME')
guid = models.CharField(max_length=64, db_column='GUID', blank=True)
lfn = models.CharField(max_length=256, db_column='LFN', blank=True)
type = models.CharField(max_length=20, db_column='TYPE', blank=True)
dataset = models.CharField(max_length=255, db_column='DATASET', blank=True)
status = models.CharField(max_length=64, db_column='STATUS', blank=True)
proddblock = models.CharField(max_length=255, db_column='PRODDBLOCK', blank=True)
proddblocktoken = models.CharField(max_length=250, db_column='PRODDBLOCKTOKEN', blank=True)
dispatchdblock = models.CharField(max_length=265, db_column='DISPATCHDBLOCK', blank=True)
dispatchdblocktoken = models.CharField(max_length=250, db_column='DISPATCHDBLOCKTOKEN', blank=True)
destinationdblock = models.CharField(max_length=265, db_column='DESTINATIONDBLOCK', blank=True)
destinationdblocktoken = models.CharField(max_length=250, db_column='DESTINATIONDBLOCKTOKEN', blank=True)
destinationse = models.CharField(max_length=250, db_column='DESTINATIONSE', blank=True)
fsize = models.BigIntegerField(db_column='FSIZE')
md5sum = models.CharField(max_length=40, db_column='MD5SUM', blank=True)
checksum = models.CharField(max_length=40, db_column='CHECKSUM', blank=True)
scope = models.CharField(max_length=30, db_column='SCOPE', blank=True)
jeditaskid = models.BigIntegerField(null=True, db_column='JEDITASKID', blank=True)
datasetid = models.BigIntegerField(null=True, db_column='DATASETID', blank=True)
fileid = models.BigIntegerField(null=True, db_column='FILEID', blank=True)
attemptnr = models.IntegerField(null=True, db_column='ATTEMPTNR', blank=True)
class Meta:
db_table = u'filestable_arch'
unique_together = ('row_id', 'modificationtime')
class Groups(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=180, db_column='NAME')
description = models.CharField(max_length=360, db_column='DESCRIPTION')
url = models.CharField(max_length=300, db_column='URL', blank=True)
classa = models.CharField(max_length=90, db_column='CLASSA', blank=True)
classp = models.CharField(max_length=90, db_column='CLASSP', blank=True)
classxp = models.CharField(max_length=90, db_column='CLASSXP', blank=True)
njobs1 = models.IntegerField(null=True, db_column='NJOBS1', blank=True)
njobs7 = models.IntegerField(null=True, db_column='NJOBS7', blank=True)
njobs30 = models.IntegerField(null=True, db_column='NJOBS30', blank=True)
cpua1 = models.BigIntegerField(null=True, db_column='CPUA1', blank=True)
cpua7 = models.BigIntegerField(null=True, db_column='CPUA7', blank=True)
cpua30 = models.BigIntegerField(null=True, db_column='CPUA30', blank=True)
cpup1 = models.BigIntegerField(null=True, db_column='CPUP1', blank=True)
cpup7 = models.BigIntegerField(null=True, db_column='CPUP7', blank=True)
cpup30 = models.BigIntegerField(null=True, db_column='CPUP30', blank=True)
cpuxp1 = models.BigIntegerField(null=True, db_column='CPUXP1', blank=True)
cpuxp7 = models.BigIntegerField(null=True, db_column='CPUXP7', blank=True)
cpuxp30 = models.BigIntegerField(null=True, db_column='CPUXP30', blank=True)
allcpua1 = models.BigIntegerField(null=True, db_column='ALLCPUA1', blank=True)
allcpua7 = models.BigIntegerField(null=True, db_column='ALLCPUA7', blank=True)
allcpua30 = models.BigIntegerField(null=True, db_column='ALLCPUA30', blank=True)
allcpup1 = models.BigIntegerField(null=True, db_column='ALLCPUP1', blank=True)
allcpup7 = models.BigIntegerField(null=True, db_column='ALLCPUP7', blank=True)
allcpup30 = models.BigIntegerField(null=True, db_column='ALLCPUP30', blank=True)
allcpuxp1 = models.BigIntegerField(null=True, db_column='ALLCPUXP1', blank=True)
allcpuxp7 = models.BigIntegerField(null=True, db_column='ALLCPUXP7', blank=True)
allcpuxp30 = models.BigIntegerField(null=True, db_column='ALLCPUXP30', blank=True)
quotaa1 = models.BigIntegerField(null=True, db_column='QUOTAA1', blank=True)
quotaa7 = models.BigIntegerField(null=True, db_column='QUOTAA7', blank=True)
quotaa30 = models.BigIntegerField(null=True, db_column='QUOTAA30', blank=True)
quotap1 = models.BigIntegerField(null=True, db_column='QUOTAP1', blank=True)
quotap7 = models.BigIntegerField(null=True, db_column='QUOTAP7', blank=True)
quotap30 = models.BigIntegerField(null=True, db_column='QUOTAP30', blank=True)
quotaxp1 = models.BigIntegerField(null=True, db_column='QUOTAXP1', blank=True)
quotaxp7 = models.BigIntegerField(null=True, db_column='QUOTAXP7', blank=True)
quotaxp30 = models.BigIntegerField(null=True, db_column='QUOTAXP30', blank=True)
allquotaa1 = models.BigIntegerField(null=True, db_column='ALLQUOTAA1', blank=True)
allquotaa7 = models.BigIntegerField(null=True, db_column='ALLQUOTAA7', blank=True)
allquotaa30 = models.BigIntegerField(null=True, db_column='ALLQUOTAA30', blank=True)
allquotap1 = models.BigIntegerField(null=True, db_column='ALLQUOTAP1', blank=True)
allquotap7 = models.BigIntegerField(null=True, db_column='ALLQUOTAP7', blank=True)
allquotap30 = models.BigIntegerField(null=True, db_column='ALLQUOTAP30', blank=True)
allquotaxp1 = models.BigIntegerField(null=True, db_column='ALLQUOTAXP1', blank=True)
allquotaxp7 = models.BigIntegerField(null=True, db_column='ALLQUOTAXP7', blank=True)
allquotaxp30 = models.BigIntegerField(null=True, db_column='ALLQUOTAXP30', blank=True)
space1 = models.IntegerField(null=True, db_column='SPACE1', blank=True)
space7 = models.IntegerField(null=True, db_column='SPACE7', blank=True)
space30 = models.IntegerField(null=True, db_column='SPACE30', blank=True)
class Meta:
db_table = u'groups'
class History(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
entrytime = models.DateTimeField(db_column='ENTRYTIME')
starttime = models.DateTimeField(db_column='STARTTIME')
endtime = models.DateTimeField(db_column='ENDTIME')
cpu = models.BigIntegerField(null=True, db_column='CPU', blank=True)
cpuxp = models.BigIntegerField(null=True, db_column='CPUXP', blank=True)
space = models.IntegerField(null=True, db_column='SPACE', blank=True)
class Meta:
db_table = u'history'
class Incidents(models.Model):
at_time = models.DateTimeField(primary_key=True, db_column='AT_TIME')
typekey = models.CharField(max_length=60, db_column='TYPEKEY', blank=True)
description = models.CharField(max_length=600, db_column='DESCRIPTION', blank=True)
class Meta:
db_table = u'incidents'
class InfomodelsSitestatus(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID')
sitename = models.CharField(max_length=180, db_column='SITENAME', blank=True)
active = models.IntegerField(null=True, db_column='ACTIVE', blank=True)
class Meta:
db_table = u'infomodels_sitestatus'
class Installedsw(models.Model):
siteid = models.CharField(max_length=180, db_column='SITEID', primary_key=True)
cloud = models.CharField(max_length=30, db_column='CLOUD', blank=True)
release = models.CharField(max_length=30, db_column='RELEASE', primary_key=True)
cache = models.CharField(max_length=120, db_column='CACHE', primary_key=True)
validation = models.CharField(max_length=30, db_column='VALIDATION', blank=True)
cmtconfig = models.CharField(max_length=120, db_column='CMTCONFIG', primary_key=True)
class Meta:
db_table = u'installedsw'
unique_together = ('siteid', 'release', 'cache', 'cmtconfig')
class Jdllist(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
host = models.CharField(max_length=180, db_column='HOST', blank=True)
system = models.CharField(max_length=60, db_column='SYSTEM')
jdl = models.CharField(max_length=12000, db_column='JDL', blank=True)
class Meta:
db_table = u'jdllist'
class JediAuxStatusMintaskid(models.Model):
status = models.CharField(max_length=192, primary_key=True, db_column='STATUS')
min_jeditaskid = models.BigIntegerField(db_column='MIN_JEDITASKID')
class Meta:
db_table = u'jedi_aux_status_mintaskid'
class JediDatasetContents(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
datasetid = models.BigIntegerField(db_column='DATASETID', primary_key=True)
fileid = models.BigIntegerField(db_column='FILEID', primary_key=True)
creationdate = models.DateTimeField(db_column='CREATIONDATE')
lastattempttime = models.DateTimeField(null=True, db_column='LASTATTEMPTTIME', blank=True)
lfn = models.CharField(max_length=768, db_column='LFN')
guid = models.CharField(max_length=192, db_column='GUID', blank=True)
type = models.CharField(max_length=60, db_column='TYPE')
status = models.CharField(max_length=192, db_column='STATUS')
fsize = models.BigIntegerField(null=True, db_column='FSIZE', blank=True)
checksum = models.CharField(max_length=108, db_column='CHECKSUM', blank=True)
scope = models.CharField(max_length=90, db_column='SCOPE', blank=True)
attemptnr = models.IntegerField(null=True, db_column='ATTEMPTNR', blank=True)
maxattempt = models.IntegerField(null=True, db_column='MAXATTEMPT', blank=True)
nevents = models.IntegerField(null=True, db_column='NEVENTS', blank=True)
keeptrack = models.IntegerField(null=True, db_column='KEEPTRACK', blank=True)
startevent = models.IntegerField(null=True, db_column='STARTEVENT', blank=True)
endevent = models.IntegerField(null=True, db_column='ENDEVENT', blank=True)
firstevent = models.IntegerField(null=True, db_column='FIRSTEVENT', blank=True)
boundaryid = models.BigIntegerField(null=True, db_column='BOUNDARYID', blank=True)
pandaid = models.BigIntegerField(db_column='PANDAID', blank=True)
class Meta:
db_table = u'jedi_dataset_contents'
unique_together = ('jeditaskid', 'datasetid', 'fileid')
class JediDatasets(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
datasetid = models.BigIntegerField(db_column='DATASETID', primary_key=True)
datasetname = models.CharField(max_length=765, db_column='DATASETNAME')
type = models.CharField(max_length=60, db_column='TYPE')
creationtime = models.DateTimeField(db_column='CREATIONTIME')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
vo = models.CharField(max_length=48, db_column='VO', blank=True)
cloud = models.CharField(max_length=30, db_column='CLOUD', blank=True)
site = models.CharField(max_length=180, db_column='SITE', blank=True)
masterid = models.BigIntegerField(null=True, db_column='MASTERID', blank=True)
provenanceid = models.BigIntegerField(null=True, db_column='PROVENANCEID', blank=True)
containername = models.CharField(max_length=396, db_column='CONTAINERNAME', blank=True)
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
state = models.CharField(max_length=60, db_column='STATE', blank=True)
statechecktime = models.DateTimeField(null=True, db_column='STATECHECKTIME', blank=True)
statecheckexpiration = models.DateTimeField(null=True, db_column='STATECHECKEXPIRATION', blank=True)
frozentime = models.DateTimeField(null=True, db_column='FROZENTIME', blank=True)
nfiles = models.IntegerField(null=True, db_column='NFILES', blank=True)
nfilestobeused = models.IntegerField(null=True, db_column='NFILESTOBEUSED', blank=True)
nfilesused = models.IntegerField(null=True, db_column='NFILESUSED', blank=True)
nevents = models.BigIntegerField(null=True, db_column='NEVENTS', blank=True)
neventstobeused = models.BigIntegerField(null=True, db_column='NEVENTSTOBEUSED', blank=True)
neventsused = models.BigIntegerField(null=True, db_column='NEVENTSUSED', blank=True)
lockedby = models.CharField(max_length=120, db_column='LOCKEDBY', blank=True)
lockedtime = models.DateTimeField(null=True, db_column='LOCKEDTIME', blank=True)
nfilesfinished = models.IntegerField(null=True, db_column='NFILESFINISHED', blank=True)
nfilesfailed = models.IntegerField(null=True, db_column='NFILESFAILED', blank=True)
attributes = models.CharField(max_length=300, db_column='ATTRIBUTES', blank=True)
streamname = models.CharField(max_length=60, db_column='STREAMNAME', blank=True)
storagetoken = models.CharField(max_length=180, db_column='STORAGETOKEN', blank=True)
destination = models.CharField(max_length=180, db_column='DESTINATION', blank=True)
nfilesonhold = models.IntegerField(null=True, db_column='NFILESONHOLD', blank=True)
templateid = models.BigIntegerField(db_column='TEMPLATEID', blank=True)
class Meta:
db_table = u'jedi_datasets'
unique_together = ('jeditaskid', 'datasetid')
class JediEvents(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
fileid = models.BigIntegerField(db_column='FILEID', primary_key=True)
job_processid = models.IntegerField(db_column='JOB_PROCESSID', primary_key=True)
def_min_eventid = models.IntegerField(null=True, db_column='DEF_MIN_EVENTID', blank=True)
def_max_eventid = models.IntegerField(null=True, db_column='DEF_MAX_EVENTID', blank=True)
processed_upto_eventid = models.IntegerField(null=True, db_column='PROCESSED_UPTO_EVENTID', blank=True)
datasetid = models.BigIntegerField(db_column='DATASETID', blank=True)
status = models.IntegerField(db_column='STATUS', blank=True)
attemptnr = models.IntegerField(db_column='ATTEMPTNR', blank=True)
class Meta:
db_table = u'jedi_events'
unique_together = ('jeditaskid', 'pandaid', 'fileid', 'job_processid')
class JediJobparamsTemplate(models.Model):
jeditaskid = models.BigIntegerField(primary_key=True, db_column='JEDITASKID')
jobparamstemplate = models.TextField(db_column='JOBPARAMSTEMPLATE', blank=True)
class Meta:
db_table = u'jedi_jobparams_template'
class JediJobRetryHistory(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
oldpandaid = models.BigIntegerField(db_column='OLDPANDAID', primary_key=True)
newpandaid = models.BigIntegerField(db_column='NEWPANDAID', primary_key=True)
ins_utc_tstamp = models.BigIntegerField(db_column='INS_UTC_TSTAMP', blank=True)
relationtype = models.CharField(max_length=48, db_column='RELATIONTYPE')
class Meta:
db_table = u'jedi_job_retry_history'
unique_together = ('jeditaskid', 'oldpandaid', 'newpandaid')
class JediOutputTemplate(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
datasetid = models.BigIntegerField(db_column='DATASETID', primary_key=True)
outtempid = models.BigIntegerField(db_column='OUTTEMPID', primary_key=True)
filenametemplate = models.CharField(max_length=768, db_column='FILENAMETEMPLATE')
maxserialnr = models.IntegerField(null=True, db_column='MAXSERIALNR', blank=True)
serialnr = models.IntegerField(null=True, db_column='SERIALNR', blank=True)
sourcename = models.CharField(max_length=768, db_column='SOURCENAME', blank=True)
streamname = models.CharField(max_length=60, db_column='STREAMNAME', blank=True)
outtype = models.CharField(max_length=60, db_column='OUTTYPE', blank=True)
class Meta:
db_table = u'jedi_output_template'
unique_together = ('jeditaskid', 'datasetid', 'outtempid')
class JediTaskparams(models.Model):
jeditaskid = models.BigIntegerField(primary_key=True, db_column='JEDITASKID')
taskparams = models.TextField(db_column='TASKPARAMS', blank=True)
class Meta:
db_table = u'jedi_taskparams'
class JediTasks(models.Model):
jeditaskid = models.BigIntegerField(primary_key=True, db_column='JEDITASKID')
taskname = models.CharField(max_length=384, db_column='TASKNAME', blank=True)
status = models.CharField(max_length=192, db_column='STATUS')
username = models.CharField(max_length=384, db_column='USERNAME')
creationdate = models.DateTimeField(db_column='CREATIONDATE')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
reqid = models.IntegerField(null=True, db_column='REQID', blank=True)
oldstatus = models.CharField(max_length=192, db_column='OLDSTATUS', blank=True)
cloud = models.CharField(max_length=30, db_column='CLOUD', blank=True)
site = models.CharField(max_length=180, db_column='SITE', blank=True)
starttime = models.DateTimeField(null=True, db_column='STARTTIME', blank=True)
endtime = models.DateTimeField(null=True, db_column='ENDTIME', blank=True)
frozentime = models.DateTimeField(null=True, db_column='FROZENTIME', blank=True)
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
workinggroup = models.CharField(max_length=96, db_column='WORKINGGROUP', blank=True)
vo = models.CharField(max_length=48, db_column='VO', blank=True)
corecount = models.IntegerField(null=True, db_column='CORECOUNT', blank=True)
tasktype = models.CharField(max_length=192, db_column='TASKTYPE', blank=True)
processingtype = models.CharField(max_length=192, db_column='PROCESSINGTYPE', blank=True)
taskpriority = models.IntegerField(null=True, db_column='TASKPRIORITY', blank=True)
currentpriority = models.IntegerField(null=True, db_column='CURRENTPRIORITY', blank=True)
architecture = models.CharField(max_length=768, db_column='ARCHITECTURE', blank=True)
transuses = models.CharField(max_length=192, db_column='TRANSUSES', blank=True)
transhome = models.CharField(max_length=384, db_column='TRANSHOME', blank=True)
transpath = models.CharField(max_length=384, db_column='TRANSPATH', blank=True)
lockedby = models.CharField(max_length=120, db_column='LOCKEDBY', blank=True)
lockedtime = models.DateTimeField(null=True, db_column='LOCKEDTIME', blank=True)
termcondition = models.CharField(max_length=300, db_column='TERMCONDITION', blank=True)
splitrule = models.CharField(max_length=300, db_column='SPLITRULE', blank=True)
walltime = models.IntegerField(null=True, db_column='WALLTIME', blank=True)
walltimeunit = models.CharField(max_length=96, db_column='WALLTIMEUNIT', blank=True)
outdiskcount = models.IntegerField(null=True, db_column='OUTDISKCOUNT', blank=True)
outdiskunit = models.CharField(max_length=96, db_column='OUTDISKUNIT', blank=True)
workdiskcount = models.IntegerField(null=True, db_column='WORKDISKCOUNT', blank=True)
workdiskunit = models.CharField(max_length=96, db_column='WORKDISKUNIT', blank=True)
ramcount = models.IntegerField(null=True, db_column='RAMCOUNT', blank=True)
ramunit = models.CharField(max_length=96, db_column='RAMUNIT', blank=True)
iointensity = models.IntegerField(null=True, db_column='IOINTENSITY', blank=True)
iointensityunit = models.CharField(max_length=96, db_column='IOINTENSITYUNIT', blank=True)
workqueue_id = models.IntegerField(null=True, db_column='WORKQUEUE_ID', blank=True)
progress = models.IntegerField(null=True, db_column='PROGRESS', blank=True)
failurerate = models.IntegerField(null=True, db_column='FAILURERATE', blank=True)
errordialog = models.CharField(max_length=765, db_column='ERRORDIALOG', blank=True)
countrygroup = models.CharField(max_length=20, db_column='COUNTRYGROUP', blank=True)
parent_tid = models.BigIntegerField(db_column='PARENT_TID', blank=True)
eventservice = models.IntegerField(null=True, db_column='EVENTSERVICE', blank=True)
ticketid = models.CharField(max_length=50, db_column='TICKETID', blank=True)
ticketsystemtype = models.CharField(max_length=16, db_column='TICKETSYSTEMTYPE', blank=True)
statechangetime = models.DateTimeField(null=True, db_column='STATECHANGETIME', blank=True)
superstatus = models.CharField(max_length=64, db_column='SUPERSTATUS', blank=True)
campaign = models.CharField(max_length=72, db_column='CAMPAIGN', blank=True)
class Meta:
db_table = u'jedi_tasks'
class GetEventsForTask(models.Model):
jeditaskid = models.BigIntegerField(db_column='JEDITASKID', primary_key=True)
totevrem = models.BigIntegerField(db_column='totevrem')
totev = models.BigIntegerField(db_column='totev')
class Meta:
db_table = u'"ATLAS_PANDABIGMON"."GETEVENTSFORTASK"'
class JediWorkQueue(models.Model):
queue_id = models.IntegerField(primary_key=True, db_column='QUEUE_ID')
queue_name = models.CharField(max_length=16, db_column='QUEUE_NAME')
queue_type = models.CharField(max_length=16, db_column='QUEUE_TYPE')
vo = models.CharField(max_length=16, db_column='VO')
status = models.CharField(max_length=64, db_column='STATUS', blank=True)
partitionid = models.IntegerField(null=True, db_column='PARTITIONID', blank=True)
stretchable = models.IntegerField(null=True, db_column='STRETCHABLE', blank=True)
queue_share = models.IntegerField(null=True, db_column='QUEUE_SHARE', blank=True)
queue_order = models.IntegerField(null=True, db_column='QUEUE_ORDER', blank=True)
criteria = models.CharField(max_length=256, db_column='CRITERIA', blank=True)
variables = models.CharField(max_length=256, db_column='VARIABLES', blank=True)
class Meta:
db_table = u'jedi_work_queue'
class Jobclass(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=90, db_column='NAME')
description = models.CharField(max_length=90, db_column='DESCRIPTION')
rights = models.CharField(max_length=90, db_column='RIGHTS', blank=True)
priority = models.IntegerField(null=True, db_column='PRIORITY', blank=True)
quota1 = models.BigIntegerField(null=True, db_column='QUOTA1', blank=True)
quota7 = models.BigIntegerField(null=True, db_column='QUOTA7', blank=True)
quota30 = models.BigIntegerField(null=True, db_column='QUOTA30', blank=True)
class Meta:
db_table = u'jobclass'
class Jobparamstable(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
jobparameters = models.TextField(db_column='JOBPARAMETERS', blank=True)
class Meta:
db_table = u'jobparamstable'
unique_together = ('pandaid', 'modificationtime')
class JobparamstableArch(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
jobparameters = models.TextField(db_column='JOBPARAMETERS', blank=True)
class Meta:
db_table = u'jobparamstable_arch'
class JobsStatuslog(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID')
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
jobstatus = models.CharField(max_length=45, db_column='JOBSTATUS')
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
cloud = models.CharField(max_length=150, db_column='CLOUD', blank=True)
computingsite = models.CharField(max_length=384, db_column='COMPUTINGSITE', blank=True)
modificationhost = models.CharField(max_length=384, db_column='MODIFICATIONHOST', blank=True)
class Meta:
db_table = u'jobs_statuslog'
class Jobsarchived4WnlistStats(models.Model):
modificationtime = models.DateTimeField(primary_key=True, db_column='MODIFICATIONTIME')
computingsite = models.CharField(max_length=384, db_column='COMPUTINGSITE', blank=True)
modificationhost = models.CharField(max_length=384, db_column='MODIFICATIONHOST', blank=True)
jobstatus = models.CharField(max_length=45, db_column='JOBSTATUS')
transexitcode = models.CharField(max_length=384, db_column='TRANSEXITCODE', blank=True)
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
num_of_jobs = models.IntegerField(null=True, db_column='NUM_OF_JOBS', blank=True)
max_modificationtime = models.DateTimeField(null=True, db_column='MAX_MODIFICATIONTIME', blank=True)
cur_date = models.DateTimeField(null=True, db_column='CUR_DATE', blank=True)
class Meta:
db_table = u'jobsarchived4_wnlist_stats'
class Jobsdebug(models.Model):
pandaid = models.BigIntegerField(primary_key=True, db_column='PANDAID')
stdout = models.CharField(max_length=6144, db_column='STDOUT', blank=True)
class Meta:
db_table = u'jobsdebug'
class Logstable(models.Model):
pandaid = models.IntegerField(primary_key=True, db_column='PANDAID')
log1 = models.TextField(db_column='LOG1')
log2 = models.TextField(db_column='LOG2')
log3 = models.TextField(db_column='LOG3')
log4 = models.TextField(db_column='LOG4')
class Meta:
db_table = u'logstable'
class Members(models.Model):
uname = models.CharField(max_length=90, db_column='UNAME', primary_key=True)
gname = models.CharField(max_length=90, db_column='GNAME', primary_key=True)
rights = models.CharField(max_length=90, db_column='RIGHTS', blank=True)
since = models.DateTimeField(db_column='SINCE')
class Meta:
db_table = u'members'
unique_together = ('uname', 'gname')
class Metatable(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME', primary_key=True)
metadata = models.TextField(db_column='METADATA', blank=True)
class Meta:
db_table = u'metatable'
unique_together = ('pandaid', 'modificationtime')
class MetatableArch(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modificationtime = models.DateTimeField(db_column='MODIFICATIONTIME')
metadata = models.TextField(db_column='METADATA', blank=True)
class Meta:
db_table = u'metatable_arch'
class MvJobsactive4Stats(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID')
cur_date = models.DateTimeField(db_column='CUR_DATE')
cloud = models.CharField(max_length=150, db_column='CLOUD', blank=True)
computingsite = models.CharField(max_length=384, db_column='COMPUTINGSITE', blank=True)
countrygroup = models.CharField(max_length=60, db_column='COUNTRYGROUP', blank=True)
workinggroup = models.CharField(max_length=60, db_column='WORKINGGROUP', blank=True)
relocationflag = models.IntegerField(null=True, db_column='RELOCATIONFLAG', blank=True)
jobstatus = models.CharField(max_length=45, db_column='JOBSTATUS')
processingtype = models.CharField(max_length=192, db_column='PROCESSINGTYPE', blank=True)
prodsourcelabel = models.CharField(max_length=60, db_column='PRODSOURCELABEL', blank=True)
currentpriority = models.IntegerField(null=True, db_column='CURRENTPRIORITY', blank=True)
num_of_jobs = models.IntegerField(null=True, db_column='NUM_OF_JOBS', blank=True)
vo = models.CharField(max_length=48, db_column='VO', blank=True)
workqueue_id = models.IntegerField(null=True, db_column='WORKQUEUE_ID', blank=True)
class Meta:
db_table = u'mv_jobsactive4_stats'
class OldSubcounter(models.Model):
subid = models.BigIntegerField(primary_key=True, db_column='SUBID')
class Meta:
db_table = u'old_subcounter'
class Pandaconfig(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
controller = models.CharField(max_length=60, db_column='CONTROLLER')
pathena = models.CharField(max_length=60, db_column='PATHENA', blank=True)
class Meta:
db_table = u'pandaconfig'
class PandaidsDeleted(models.Model):
pandaid = models.BigIntegerField(primary_key=True, db_column='PANDAID')
tstamp_datadel = models.DateTimeField(null=True, db_column='TSTAMP_DATADEL', blank=True)
class Meta:
db_table = u'pandaids_deleted'
class PandaidsModiftime(models.Model):
pandaid = models.BigIntegerField(db_column='PANDAID', primary_key=True)
modiftime = models.DateTimeField(db_column='MODIFTIME', primary_key=True)
class Meta:
db_table = u'pandaids_modiftime'
unique_together = ('pandaid', 'modiftime')
class Pandalog(models.Model):
bintime = models.DateTimeField(db_column='BINTIME', primary_key=True)
name = models.CharField(max_length=90, db_column='NAME', blank=True)
module = models.CharField(max_length=90, db_column='MODULE', blank=True)
loguser = models.CharField(max_length=240, db_column='LOGUSER', blank=True)
type = models.CharField(max_length=60, db_column='TYPE', blank=True)
pid = models.BigIntegerField(db_column='PID')
loglevel = models.IntegerField(db_column='LOGLEVEL')
levelname = models.CharField(max_length=90, db_column='LEVELNAME', blank=True)
time = models.CharField(max_length=90, db_column='TIME', blank=True)
filename = models.CharField(max_length=300, db_column='FILENAME', blank=True)
line = models.IntegerField(db_column='LINE')
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
class Meta:
db_table = u'pandalog'
class Passwords(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
pass_field = models.CharField(max_length=180, db_column='PASS') # Field renamed because it was a Python reserved word.
class Meta:
db_table = u'passwords'
class Pilotqueue(models.Model):
jobid = models.CharField(db_column='JOBID', max_length=100, primary_key=True)
tpid = models.CharField(max_length=180, db_column='TPID')
url = models.CharField(max_length=600, db_column='URL', blank=True)
nickname = models.CharField(max_length=180, db_column='NICKNAME', primary_key=True)
system = models.CharField(max_length=60, db_column='SYSTEM')
user_field = models.CharField(max_length=180, db_column='USER_') # Field renamed because it was a Python reserved word.
host = models.CharField(max_length=180, db_column='HOST')
submithost = models.CharField(max_length=180, db_column='SUBMITHOST')
queueid = models.CharField(max_length=180, db_column='QUEUEID')
type = models.CharField(max_length=60, db_column='TYPE')
pandaid = models.IntegerField(null=True, db_column='PANDAID', blank=True)
tcheck = models.DateTimeField(db_column='TCHECK')
state = models.CharField(max_length=90, db_column='STATE')
tstate = models.DateTimeField(db_column='TSTATE')
tenter = models.DateTimeField(db_column='TENTER')
tsubmit = models.DateTimeField(db_column='TSUBMIT')
taccept = models.DateTimeField(db_column='TACCEPT')
tschedule = models.DateTimeField(db_column='TSCHEDULE')
tstart = models.DateTimeField(db_column='TSTART')
tend = models.DateTimeField(db_column='TEND')
tdone = models.DateTimeField(db_column='TDONE')
tretrieve = models.DateTimeField(db_column='TRETRIEVE')
status = models.CharField(max_length=60, db_column='STATUS')
errcode = models.IntegerField(db_column='ERRCODE')
errinfo = models.CharField(max_length=450, db_column='ERRINFO')
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
schedd_name = models.CharField(max_length=180, db_column='SCHEDD_NAME')
workernode = models.CharField(max_length=180, db_column='WORKERNODE')
class Meta:
db_table = u'pilotqueue'
unique_together = ('jobid', 'nickname')
class PilotqueueBnl(models.Model):
jobid = models.CharField(max_length=300, db_column='JOBID')
tpid = models.CharField(max_length=180, primary_key=True, db_column='TPID')
url = models.CharField(max_length=600, db_column='URL')
nickname = models.CharField(max_length=180, db_column='NICKNAME')
system = models.CharField(max_length=60, db_column='SYSTEM')
user_field = models.CharField(max_length=180, db_column='USER_') # Field renamed because it was a Python reserved word.
host = models.CharField(max_length=180, db_column='HOST')
submithost = models.CharField(max_length=180, db_column='SUBMITHOST')
schedd_name = models.CharField(max_length=180, db_column='SCHEDD_NAME')
queueid = models.CharField(max_length=180, db_column='QUEUEID')
type = models.CharField(max_length=60, db_column='TYPE')
pandaid = models.IntegerField(null=True, db_column='PANDAID', blank=True)
tcheck = models.DateTimeField(db_column='TCHECK')
state = models.CharField(max_length=90, db_column='STATE')
tstate = models.DateTimeField(db_column='TSTATE')
tenter = models.DateTimeField(db_column='TENTER')
tsubmit = models.DateTimeField(db_column='TSUBMIT')
taccept = models.DateTimeField(db_column='TACCEPT')
tschedule = models.DateTimeField(db_column='TSCHEDULE')
tstart = models.DateTimeField(db_column='TSTART')
tend = models.DateTimeField(db_column='TEND')
tdone = models.DateTimeField(db_column='TDONE')
tretrieve = models.DateTimeField(db_column='TRETRIEVE')
status = models.CharField(max_length=60, db_column='STATUS')
errcode = models.IntegerField(db_column='ERRCODE')
errinfo = models.CharField(max_length=450, db_column='ERRINFO')
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
workernode = models.CharField(max_length=180, db_column='WORKERNODE')
class Meta:
db_table = u'pilotqueue_bnl'
class Pilottoken(models.Model):
token = models.CharField(max_length=192, primary_key=True, db_column='TOKEN')
schedulerhost = models.CharField(max_length=300, db_column='SCHEDULERHOST', blank=True)
scheduleruser = models.CharField(max_length=450, db_column='SCHEDULERUSER', blank=True)
usages = models.IntegerField(db_column='USAGES')
created = models.DateTimeField(db_column='CREATED')
expires = models.DateTimeField(db_column='EXPIRES')
schedulerid = models.CharField(max_length=240, db_column='SCHEDULERID', blank=True)
class Meta:
db_table = u'pilottoken'
class Pilottype(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
script = models.CharField(max_length=180, db_column='SCRIPT')
url = models.CharField(max_length=450, db_column='URL')
system = models.CharField(max_length=180, db_column='SYSTEM')
class Meta:
db_table = u'pilottype'
class PoolCollLock(models.Model):
id = models.CharField(max_length=150, primary_key=True, db_column='ID')
collection = models.CharField(max_length=1500, db_column='COLLECTION', blank=True)
client_info = models.CharField(max_length=1500, db_column='CLIENT_INFO', blank=True)
locktype = models.CharField(max_length=60, db_column='LOCKTYPE', blank=True)
timestamp = models.DateTimeField(null=True, db_column='TIMESTAMP', blank=True)
class Meta:
db_table = u'pool_coll_lock'
class PoolCollectionData(models.Model):
id = models.DecimalField(decimal_places=0, primary_key=True, db_column='ID', max_digits=11)
oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_1', blank=True)
oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_2', blank=True)
var_1_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_1', blank=True)
var_1_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_2', blank=True)
var_2_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_1', blank=True)
var_2_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_2', blank=True)
var_3 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_3', blank=True)
var_4 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_4', blank=True)
var_5 = models.FloatField(null=True, db_column='VAR_5', blank=True)
var_6 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_6', blank=True)
var_7 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_7', blank=True)
var_8 = models.FloatField(null=True, db_column='VAR_8', blank=True)
var_9 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_9', blank=True)
var_10 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_10', blank=True)
var_11 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_11', blank=True)
var_12 = models.FloatField(null=True, db_column='VAR_12', blank=True)
var_13 = models.FloatField(null=True, db_column='VAR_13', blank=True)
var_14 = models.FloatField(null=True, db_column='VAR_14', blank=True)
var_15 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_15', blank=True)
var_16 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_16', blank=True)
var_17 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_17', blank=True)
var_18 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_18', blank=True)
var_19 = models.FloatField(null=True, db_column='VAR_19', blank=True)
var_20 = models.FloatField(null=True, db_column='VAR_20', blank=True)
var_21 = models.FloatField(null=True, db_column='VAR_21', blank=True)
var_22 = models.FloatField(null=True, db_column='VAR_22', blank=True)
var_23 = models.FloatField(null=True, db_column='VAR_23', blank=True)
var_24 = models.FloatField(null=True, db_column='VAR_24', blank=True)
var_25 = models.FloatField(null=True, db_column='VAR_25', blank=True)
var_26 = models.FloatField(null=True, db_column='VAR_26', blank=True)
var_27 = models.FloatField(null=True, db_column='VAR_27', blank=True)
var_28 = models.FloatField(null=True, db_column='VAR_28', blank=True)
var_29 = models.FloatField(null=True, db_column='VAR_29', blank=True)
var_30 = models.FloatField(null=True, db_column='VAR_30', blank=True)
var_31 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_31', blank=True)
var_32 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_32', blank=True)
var_33 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_33', blank=True)
var_34 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_34', blank=True)
var_35 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_35', blank=True)
var_36 = models.FloatField(null=True, db_column='VAR_36', blank=True)
var_37 = models.FloatField(null=True, db_column='VAR_37', blank=True)
var_38 = models.FloatField(null=True, db_column='VAR_38', blank=True)
var_39 = models.FloatField(null=True, db_column='VAR_39', blank=True)
var_40 = models.FloatField(null=True, db_column='VAR_40', blank=True)
var_41 = models.FloatField(null=True, db_column='VAR_41', blank=True)
var_42 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_42', blank=True)
var_43 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_43', blank=True)
var_44 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_44', blank=True)
var_45 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_45', blank=True)
var_46 = models.FloatField(null=True, db_column='VAR_46', blank=True)
var_47 = models.FloatField(null=True, db_column='VAR_47', blank=True)
var_48 = models.FloatField(null=True, db_column='VAR_48', blank=True)
var_49 = models.FloatField(null=True, db_column='VAR_49', blank=True)
var_50 = models.FloatField(null=True, db_column='VAR_50', blank=True)
var_51 = models.FloatField(null=True, db_column='VAR_51', blank=True)
var_52 = models.FloatField(null=True, db_column='VAR_52', blank=True)
var_53 = models.FloatField(null=True, db_column='VAR_53', blank=True)
var_54 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_54', blank=True)
var_55 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_55', blank=True)
var_56 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_56', blank=True)
var_57 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_57', blank=True)
var_58 = models.FloatField(null=True, db_column='VAR_58', blank=True)
var_59 = models.FloatField(null=True, db_column='VAR_59', blank=True)
var_60 = models.FloatField(null=True, db_column='VAR_60', blank=True)
var_61 = models.FloatField(null=True, db_column='VAR_61', blank=True)
var_62 = models.FloatField(null=True, db_column='VAR_62', blank=True)
var_63 = models.FloatField(null=True, db_column='VAR_63', blank=True)
var_64 = models.FloatField(null=True, db_column='VAR_64', blank=True)
var_65 = models.FloatField(null=True, db_column='VAR_65', blank=True)
var_66 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_66', blank=True)
var_67 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_67', blank=True)
var_68 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_68', blank=True)
var_69 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_69', blank=True)
var_70 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_70', blank=True)
var_71 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_71', blank=True)
var_72 = models.FloatField(null=True, db_column='VAR_72', blank=True)
var_73 = models.FloatField(null=True, db_column='VAR_73', blank=True)
var_74 = models.FloatField(null=True, db_column='VAR_74', blank=True)
var_75 = models.FloatField(null=True, db_column='VAR_75', blank=True)
var_76 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_76', blank=True)
var_77 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_77', blank=True)
var_78 = models.FloatField(null=True, db_column='VAR_78', blank=True)
var_79 = models.FloatField(null=True, db_column='VAR_79', blank=True)
var_80 = models.FloatField(null=True, db_column='VAR_80', blank=True)
var_81 = models.FloatField(null=True, db_column='VAR_81', blank=True)
var_82 = models.FloatField(null=True, db_column='VAR_82', blank=True)
var_83 = models.FloatField(null=True, db_column='VAR_83', blank=True)
var_84 = models.FloatField(null=True, db_column='VAR_84', blank=True)
var_85 = models.FloatField(null=True, db_column='VAR_85', blank=True)
var_86 = models.FloatField(null=True, db_column='VAR_86', blank=True)
var_87 = models.FloatField(null=True, db_column='VAR_87', blank=True)
var_88 = models.FloatField(null=True, db_column='VAR_88', blank=True)
var_89 = models.FloatField(null=True, db_column='VAR_89', blank=True)
var_90 = models.FloatField(null=True, db_column='VAR_90', blank=True)
var_91 = models.FloatField(null=True, db_column='VAR_91', blank=True)
var_92 = models.FloatField(null=True, db_column='VAR_92', blank=True)
var_93 = models.FloatField(null=True, db_column='VAR_93', blank=True)
var_94 = models.FloatField(null=True, db_column='VAR_94', blank=True)
var_95 = models.FloatField(null=True, db_column='VAR_95', blank=True)
var_96 = models.FloatField(null=True, db_column='VAR_96', blank=True)
var_97 = models.FloatField(null=True, db_column='VAR_97', blank=True)
var_98 = models.FloatField(null=True, db_column='VAR_98', blank=True)
var_99 = models.FloatField(null=True, db_column='VAR_99', blank=True)
var_100 = models.FloatField(null=True, db_column='VAR_100', blank=True)
var_101 = models.FloatField(null=True, db_column='VAR_101', blank=True)
var_102 = models.FloatField(null=True, db_column='VAR_102', blank=True)
var_103 = models.FloatField(null=True, db_column='VAR_103', blank=True)
var_104 = models.FloatField(null=True, db_column='VAR_104', blank=True)
var_105 = models.FloatField(null=True, db_column='VAR_105', blank=True)
var_106 = models.FloatField(null=True, db_column='VAR_106', blank=True)
var_107 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_107', blank=True)
var_108 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_108', blank=True)
var_109 = models.FloatField(null=True, db_column='VAR_109', blank=True)
var_110 = models.FloatField(null=True, db_column='VAR_110', blank=True)
var_111 = models.FloatField(null=True, db_column='VAR_111', blank=True)
var_112 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_112', blank=True)
var_113 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_113', blank=True)
var_114 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_114', blank=True)
var_115 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_115', blank=True)
var_116 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_116', blank=True)
var_117 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_117', blank=True)
var_118 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_118', blank=True)
var_119 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_119', blank=True)
var_120 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_120', blank=True)
var_121 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_121', blank=True)
var_122 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_122', blank=True)
var_123 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_123', blank=True)
var_124 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_124', blank=True)
var_125 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_125', blank=True)
var_126 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_126', blank=True)
var_127 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_127', blank=True)
var_128 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_128', blank=True)
var_129 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_129', blank=True)
var_130 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_130', blank=True)
var_131 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_131', blank=True)
var_132 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_132', blank=True)
var_133 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_133', blank=True)
var_134 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_134', blank=True)
var_135 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_135', blank=True)
var_136 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_136', blank=True)
var_137 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_137', blank=True)
var_138 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_138', blank=True)
var_139 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_139', blank=True)
var_140 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_140', blank=True)
var_141 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_141', blank=True)
var_142 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_142', blank=True)
var_143 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_143', blank=True)
var_144 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_144', blank=True)
var_145 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_145', blank=True)
var_146 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_146', blank=True)
var_147 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_147', blank=True)
var_148 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_148', blank=True)
var_149 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_149', blank=True)
var_150 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_150', blank=True)
var_151 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_151', blank=True)
var_152 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_152', blank=True)
var_153 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_153', blank=True)
var_154 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_154', blank=True)
var_155 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_155', blank=True)
var_156 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_156', blank=True)
var_157 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_157', blank=True)
var_158 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_158', blank=True)
var_159 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_159', blank=True)
var_160 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_160', blank=True)
var_161 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_161', blank=True)
var_162 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_162', blank=True)
var_163 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_163', blank=True)
var_164 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_164', blank=True)
var_165 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_165', blank=True)
var_166 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_166', blank=True)
var_167 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_167', blank=True)
var_168 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_168', blank=True)
var_169 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_169', blank=True)
var_170 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_170', blank=True)
var_171 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_171', blank=True)
var_172 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_172', blank=True)
var_173 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_173', blank=True)
var_174 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_174', blank=True)
var_175 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_175', blank=True)
var_176 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_176', blank=True)
var_177 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_177', blank=True)
var_178 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_178', blank=True)
var_179 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_179', blank=True)
var_180 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_180', blank=True)
var_181 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_181', blank=True)
var_182 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_182', blank=True)
var_183 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_183', blank=True)
var_184 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_184', blank=True)
var_185 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_185', blank=True)
var_186 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_186', blank=True)
var_187 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_187', blank=True)
var_188 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_188', blank=True)
var_189 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_189', blank=True)
var_190 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_190', blank=True)
var_191 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_191', blank=True)
var_192 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_192', blank=True)
var_193 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_193', blank=True)
var_194 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_194', blank=True)
var_195 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_195', blank=True)
var_196 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_196', blank=True)
var_197 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_197', blank=True)
var_198 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_198', blank=True)
var_199 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_199', blank=True)
var_200 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_200', blank=True)
var_201 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_201', blank=True)
var_202 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_202', blank=True)
var_203 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_203', blank=True)
var_204 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_204', blank=True)
var_205 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_205', blank=True)
var_206 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_206', blank=True)
var_207 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_207', blank=True)
var_208 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_208', blank=True)
var_209 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_209', blank=True)
var_210 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_210', blank=True)
var_211 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_211', blank=True)
var_212 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_212', blank=True)
var_213 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_213', blank=True)
var_214 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_214', blank=True)
var_215 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_215', blank=True)
var_216 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_216', blank=True)
var_217 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_217', blank=True)
var_218 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_218', blank=True)
var_219 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_219', blank=True)
var_220 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_220', blank=True)
var_221 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_221', blank=True)
var_222 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_222', blank=True)
var_223 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_223', blank=True)
var_224 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_224', blank=True)
var_225 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_225', blank=True)
var_226 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_226', blank=True)
var_227 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_227', blank=True)
var_228 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_228', blank=True)
var_229 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_229', blank=True)
var_230 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_230', blank=True)
var_231 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_231', blank=True)
var_232 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_232', blank=True)
var_233 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_233', blank=True)
var_234 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_234', blank=True)
var_235 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_235', blank=True)
var_236 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_236', blank=True)
var_237 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_237', blank=True)
var_238 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_238', blank=True)
var_239 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_239', blank=True)
var_240 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_240', blank=True)
var_241 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_241', blank=True)
var_242 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_242', blank=True)
var_243 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_243', blank=True)
var_244 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_244', blank=True)
var_245 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_245', blank=True)
var_246 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_246', blank=True)
var_247 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_247', blank=True)
var_248 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_248', blank=True)
var_249 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_249', blank=True)
var_250 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_250', blank=True)
var_251 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_251', blank=True)
var_252 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_252', blank=True)
var_253 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_253', blank=True)
var_254 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_254', blank=True)
var_255 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_255', blank=True)
var_256 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_256', blank=True)
var_257 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_257', blank=True)
var_258 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_258', blank=True)
var_259 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_259', blank=True)
var_260 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_260', blank=True)
var_261 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_261', blank=True)
var_262 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_262', blank=True)
var_263 = models.FloatField(null=True, db_column='VAR_263', blank=True)
class Meta:
db_table = u'pool_collection_data'
class PoolCollectionData1(models.Model):
id = models.DecimalField(decimal_places=0, primary_key=True, db_column='ID', max_digits=11)
oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_1', blank=True)
oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='OID_2', blank=True)
var_1_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_1', blank=True)
var_1_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_1_OID_2', blank=True)
var_2_oid_1 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_1', blank=True)
var_2_oid_2 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_2_OID_2', blank=True)
var_3 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_3', blank=True)
var_4 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_4', blank=True)
var_5 = models.FloatField(null=True, db_column='VAR_5', blank=True)
var_6 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_6', blank=True)
var_7 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_7', blank=True)
var_8 = models.FloatField(null=True, db_column='VAR_8', blank=True)
var_9 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_9', blank=True)
var_10 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_10', blank=True)
var_11 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_11', blank=True)
var_12 = models.FloatField(null=True, db_column='VAR_12', blank=True)
var_13 = models.FloatField(null=True, db_column='VAR_13', blank=True)
var_14 = models.FloatField(null=True, db_column='VAR_14', blank=True)
var_15 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_15', blank=True)
var_16 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_16', blank=True)
var_17 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_17', blank=True)
var_18 = models.DecimalField(decimal_places=0, null=True, max_digits=2, db_column='VAR_18', blank=True)
var_19 = models.FloatField(null=True, db_column='VAR_19', blank=True)
var_20 = models.FloatField(null=True, db_column='VAR_20', blank=True)
var_21 = models.FloatField(null=True, db_column='VAR_21', blank=True)
var_22 = models.FloatField(null=True, db_column='VAR_22', blank=True)
var_23 = models.FloatField(null=True, db_column='VAR_23', blank=True)
var_24 = models.FloatField(null=True, db_column='VAR_24', blank=True)
var_25 = models.FloatField(null=True, db_column='VAR_25', blank=True)
var_26 = models.FloatField(null=True, db_column='VAR_26', blank=True)
var_27 = models.FloatField(null=True, db_column='VAR_27', blank=True)
var_28 = models.FloatField(null=True, db_column='VAR_28', blank=True)
var_29 = models.FloatField(null=True, db_column='VAR_29', blank=True)
var_30 = models.FloatField(null=True, db_column='VAR_30', blank=True)
var_31 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_31', blank=True)
var_32 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_32', blank=True)
var_33 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_33', blank=True)
var_34 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_34', blank=True)
var_35 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_35', blank=True)
var_36 = models.FloatField(null=True, db_column='VAR_36', blank=True)
var_37 = models.FloatField(null=True, db_column='VAR_37', blank=True)
var_38 = models.FloatField(null=True, db_column='VAR_38', blank=True)
var_39 = models.FloatField(null=True, db_column='VAR_39', blank=True)
var_40 = models.FloatField(null=True, db_column='VAR_40', blank=True)
var_41 = models.FloatField(null=True, db_column='VAR_41', blank=True)
var_42 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_42', blank=True)
var_43 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_43', blank=True)
var_44 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_44', blank=True)
var_45 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_45', blank=True)
var_46 = models.FloatField(null=True, db_column='VAR_46', blank=True)
var_47 = models.FloatField(null=True, db_column='VAR_47', blank=True)
var_48 = models.FloatField(null=True, db_column='VAR_48', blank=True)
var_49 = models.FloatField(null=True, db_column='VAR_49', blank=True)
var_50 = models.FloatField(null=True, db_column='VAR_50', blank=True)
var_51 = models.FloatField(null=True, db_column='VAR_51', blank=True)
var_52 = models.FloatField(null=True, db_column='VAR_52', blank=True)
var_53 = models.FloatField(null=True, db_column='VAR_53', blank=True)
var_54 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_54', blank=True)
var_55 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_55', blank=True)
var_56 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_56', blank=True)
var_57 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_57', blank=True)
var_58 = models.FloatField(null=True, db_column='VAR_58', blank=True)
var_59 = models.FloatField(null=True, db_column='VAR_59', blank=True)
var_60 = models.FloatField(null=True, db_column='VAR_60', blank=True)
var_61 = models.FloatField(null=True, db_column='VAR_61', blank=True)
var_62 = models.FloatField(null=True, db_column='VAR_62', blank=True)
var_63 = models.FloatField(null=True, db_column='VAR_63', blank=True)
var_64 = models.FloatField(null=True, db_column='VAR_64', blank=True)
var_65 = models.FloatField(null=True, db_column='VAR_65', blank=True)
var_66 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_66', blank=True)
var_67 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_67', blank=True)
var_68 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_68', blank=True)
var_69 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_69', blank=True)
var_70 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_70', blank=True)
var_71 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_71', blank=True)
var_72 = models.FloatField(null=True, db_column='VAR_72', blank=True)
var_73 = models.FloatField(null=True, db_column='VAR_73', blank=True)
var_74 = models.FloatField(null=True, db_column='VAR_74', blank=True)
var_75 = models.FloatField(null=True, db_column='VAR_75', blank=True)
var_76 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_76', blank=True)
var_77 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_77', blank=True)
var_78 = models.FloatField(null=True, db_column='VAR_78', blank=True)
var_79 = models.FloatField(null=True, db_column='VAR_79', blank=True)
var_80 = models.FloatField(null=True, db_column='VAR_80', blank=True)
var_81 = models.FloatField(null=True, db_column='VAR_81', blank=True)
var_82 = models.FloatField(null=True, db_column='VAR_82', blank=True)
var_83 = models.FloatField(null=True, db_column='VAR_83', blank=True)
var_84 = models.FloatField(null=True, db_column='VAR_84', blank=True)
var_85 = models.FloatField(null=True, db_column='VAR_85', blank=True)
var_86 = models.FloatField(null=True, db_column='VAR_86', blank=True)
var_87 = models.FloatField(null=True, db_column='VAR_87', blank=True)
var_88 = models.FloatField(null=True, db_column='VAR_88', blank=True)
var_89 = models.FloatField(null=True, db_column='VAR_89', blank=True)
var_90 = models.FloatField(null=True, db_column='VAR_90', blank=True)
var_91 = models.FloatField(null=True, db_column='VAR_91', blank=True)
var_92 = models.FloatField(null=True, db_column='VAR_92', blank=True)
var_93 = models.FloatField(null=True, db_column='VAR_93', blank=True)
var_94 = models.FloatField(null=True, db_column='VAR_94', blank=True)
var_95 = models.FloatField(null=True, db_column='VAR_95', blank=True)
var_96 = models.FloatField(null=True, db_column='VAR_96', blank=True)
var_97 = models.FloatField(null=True, db_column='VAR_97', blank=True)
var_98 = models.FloatField(null=True, db_column='VAR_98', blank=True)
var_99 = models.FloatField(null=True, db_column='VAR_99', blank=True)
var_100 = models.FloatField(null=True, db_column='VAR_100', blank=True)
var_101 = models.FloatField(null=True, db_column='VAR_101', blank=True)
var_102 = models.FloatField(null=True, db_column='VAR_102', blank=True)
var_103 = models.FloatField(null=True, db_column='VAR_103', blank=True)
var_104 = models.FloatField(null=True, db_column='VAR_104', blank=True)
var_105 = models.FloatField(null=True, db_column='VAR_105', blank=True)
var_106 = models.FloatField(null=True, db_column='VAR_106', blank=True)
var_107 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_107', blank=True)
var_108 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_108', blank=True)
var_109 = models.FloatField(null=True, db_column='VAR_109', blank=True)
var_110 = models.FloatField(null=True, db_column='VAR_110', blank=True)
var_111 = models.FloatField(null=True, db_column='VAR_111', blank=True)
var_112 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_112', blank=True)
var_113 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_113', blank=True)
var_114 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_114', blank=True)
var_115 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_115', blank=True)
var_116 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_116', blank=True)
var_117 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_117', blank=True)
var_118 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_118', blank=True)
var_119 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_119', blank=True)
var_120 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_120', blank=True)
var_121 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_121', blank=True)
var_122 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_122', blank=True)
var_123 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_123', blank=True)
var_124 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_124', blank=True)
var_125 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_125', blank=True)
var_126 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_126', blank=True)
var_127 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_127', blank=True)
var_128 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_128', blank=True)
var_129 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_129', blank=True)
var_130 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_130', blank=True)
var_131 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_131', blank=True)
var_132 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_132', blank=True)
var_133 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_133', blank=True)
var_134 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_134', blank=True)
var_135 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_135', blank=True)
var_136 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_136', blank=True)
var_137 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_137', blank=True)
var_138 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_138', blank=True)
var_139 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_139', blank=True)
var_140 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_140', blank=True)
var_141 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_141', blank=True)
var_142 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_142', blank=True)
var_143 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_143', blank=True)
var_144 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_144', blank=True)
var_145 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_145', blank=True)
var_146 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_146', blank=True)
var_147 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_147', blank=True)
var_148 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_148', blank=True)
var_149 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_149', blank=True)
var_150 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_150', blank=True)
var_151 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_151', blank=True)
var_152 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_152', blank=True)
var_153 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_153', blank=True)
var_154 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_154', blank=True)
var_155 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_155', blank=True)
var_156 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_156', blank=True)
var_157 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_157', blank=True)
var_158 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_158', blank=True)
var_159 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_159', blank=True)
var_160 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_160', blank=True)
var_161 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_161', blank=True)
var_162 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_162', blank=True)
var_163 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_163', blank=True)
var_164 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_164', blank=True)
var_165 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_165', blank=True)
var_166 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_166', blank=True)
var_167 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_167', blank=True)
var_168 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_168', blank=True)
var_169 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_169', blank=True)
var_170 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_170', blank=True)
var_171 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_171', blank=True)
var_172 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_172', blank=True)
var_173 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_173', blank=True)
var_174 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_174', blank=True)
var_175 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_175', blank=True)
var_176 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_176', blank=True)
var_177 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_177', blank=True)
var_178 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_178', blank=True)
var_179 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_179', blank=True)
var_180 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_180', blank=True)
var_181 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_181', blank=True)
var_182 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_182', blank=True)
var_183 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_183', blank=True)
var_184 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_184', blank=True)
var_185 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_185', blank=True)
var_186 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_186', blank=True)
var_187 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_187', blank=True)
var_188 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_188', blank=True)
var_189 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_189', blank=True)
var_190 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_190', blank=True)
var_191 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_191', blank=True)
var_192 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_192', blank=True)
var_193 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_193', blank=True)
var_194 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_194', blank=True)
var_195 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_195', blank=True)
var_196 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_196', blank=True)
var_197 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_197', blank=True)
var_198 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_198', blank=True)
var_199 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_199', blank=True)
var_200 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_200', blank=True)
var_201 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_201', blank=True)
var_202 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_202', blank=True)
var_203 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_203', blank=True)
var_204 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_204', blank=True)
var_205 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_205', blank=True)
var_206 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_206', blank=True)
var_207 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_207', blank=True)
var_208 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_208', blank=True)
var_209 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_209', blank=True)
var_210 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_210', blank=True)
var_211 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_211', blank=True)
var_212 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_212', blank=True)
var_213 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_213', blank=True)
var_214 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_214', blank=True)
var_215 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_215', blank=True)
var_216 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_216', blank=True)
var_217 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_217', blank=True)
var_218 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_218', blank=True)
var_219 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_219', blank=True)
var_220 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_220', blank=True)
var_221 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_221', blank=True)
var_222 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_222', blank=True)
var_223 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_223', blank=True)
var_224 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_224', blank=True)
var_225 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_225', blank=True)
var_226 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_226', blank=True)
var_227 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_227', blank=True)
var_228 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_228', blank=True)
var_229 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_229', blank=True)
var_230 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_230', blank=True)
var_231 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_231', blank=True)
var_232 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_232', blank=True)
var_233 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_233', blank=True)
var_234 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_234', blank=True)
var_235 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_235', blank=True)
var_236 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_236', blank=True)
var_237 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_237', blank=True)
var_238 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_238', blank=True)
var_239 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_239', blank=True)
var_240 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_240', blank=True)
var_241 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_241', blank=True)
var_242 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_242', blank=True)
var_243 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_243', blank=True)
var_244 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_244', blank=True)
var_245 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_245', blank=True)
var_246 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_246', blank=True)
var_247 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_247', blank=True)
var_248 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_248', blank=True)
var_249 = models.DecimalField(decimal_places=0, null=True, max_digits=6, db_column='VAR_249', blank=True)
var_250 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_250', blank=True)
var_251 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_251', blank=True)
var_252 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_252', blank=True)
var_253 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_253', blank=True)
var_254 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_254', blank=True)
var_255 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_255', blank=True)
var_256 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_256', blank=True)
var_257 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_257', blank=True)
var_258 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_258', blank=True)
var_259 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_259', blank=True)
var_260 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_260', blank=True)
var_261 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_261', blank=True)
var_262 = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VAR_262', blank=True)
var_263 = models.FloatField(null=True, db_column='VAR_263', blank=True)
class Meta:
db_table = u'pool_collection_data_1'
class PoolCollections(models.Model):
collection_name = models.CharField(db_column='COLLECTION_NAME', primary_key=True, max_length=255)
data_table_name = models.CharField(max_length=1200, db_column='DATA_TABLE_NAME', blank=True)
links_table_name = models.CharField(max_length=1200, db_column='LINKS_TABLE_NAME', blank=True)
records_written = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='RECORDS_WRITTEN', blank=True)
records_deleted = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='RECORDS_DELETED', blank=True)
child_collection_name = models.CharField(max_length=1200, db_column='CHILD_COLLECTION_NAME', blank=True)
foreign_key_name = models.CharField(max_length=1200, db_column='FOREIGN_KEY_NAME', blank=True)
class Meta:
db_table = u'pool_collections'
class PoolCollectionsDesc(models.Model):
collection_name = models.CharField(max_length=255, primary_key=True, db_column='COLLECTION_NAME')
variable_name = models.CharField(max_length=1200, db_column='VARIABLE_NAME', blank=True)
variable_type = models.CharField(max_length=1200, db_column='VARIABLE_TYPE', blank=True)
variable_maximum_size = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VARIABLE_MAXIMUM_SIZE', blank=True)
variable_size_is_fixed = models.CharField(max_length=15, db_column='VARIABLE_SIZE_IS_FIXED', blank=True)
variable_position = models.DecimalField(decimal_places=0, null=True, max_digits=11, db_column='VARIABLE_POSITION', blank=True)
variable_annotation = models.CharField(max_length=12000, db_column='VARIABLE_ANNOTATION', blank=True)
class Meta:
db_table = u'pool_collections_desc'
class ProdsysComm(models.Model):
comm_task = models.BigIntegerField(primary_key=True, db_column='COMM_TASK')
comm_meta = models.BigIntegerField(null=True, db_column='COMM_META', blank=True)
comm_owner = models.CharField(max_length=48, db_column='COMM_OWNER', blank=True)
comm_cmd = models.CharField(max_length=768, db_column='COMM_CMD', blank=True)
comm_ts = models.BigIntegerField(null=True, db_column='COMM_TS', blank=True)
class Meta:
db_table = u'prodsys_comm'
class Productiondatasets(models.Model):
name = models.CharField(max_length=255, primary_key=True, db_column='NAME')
version = models.IntegerField(null=True, db_column='VERSION', blank=True)
vuid = models.CharField(max_length=120, db_column='VUID')
files = models.IntegerField(null=True, db_column='FILES', blank=True)
gb = models.IntegerField(null=True, db_column='GB', blank=True)
events = models.IntegerField(null=True, db_column='EVENTS', blank=True)
site = models.CharField(max_length=30, db_column='SITE', blank=True)
sw_release = models.CharField(max_length=60, db_column='SW_RELEASE', blank=True)
geometry = models.CharField(max_length=60, db_column='GEOMETRY', blank=True)
jobid = models.IntegerField(null=True, db_column='JOBID', blank=True)
pandaid = models.IntegerField(null=True, db_column='PANDAID', blank=True)
prodtime = models.DateTimeField(null=True, db_column='PRODTIME', blank=True)
timestamp = models.IntegerField(null=True, db_column='TIMESTAMP', blank=True)
class Meta:
db_table = u'productiondatasets'
class Proxykey(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
dn = models.CharField(max_length=300, db_column='DN')
credname = models.CharField(max_length=120, db_column='CREDNAME')
created = models.DateTimeField(db_column='CREATED')
expires = models.DateTimeField(db_column='EXPIRES')
origin = models.CharField(max_length=240, db_column='ORIGIN')
myproxy = models.CharField(max_length=240, db_column='MYPROXY')
class Meta:
db_table = u'proxykey'
class Redirect(models.Model):
service = models.CharField(db_column='SERVICE', max_length=30)
type = models.CharField(db_column='TYPE', max_length=30)
site = models.CharField(db_column='SITE', max_length=30)
description = models.CharField(db_column='DESCRIPTION', max_length=120)
url = models.CharField(db_column='URL', primary_key=True, max_length=250)
testurl = models.CharField(db_column='TESTURL', max_length=250, blank=True)
response = models.CharField(db_column='RESPONSE', max_length=30)
aliveresponse = models.CharField(db_column='ALIVERESPONSE', max_length=30)
responsetime = models.IntegerField(db_column='RESPONSETIME', blank=True, null=True)
rank = models.IntegerField(db_column='RANK', blank=True, null=True)
performance = models.IntegerField(db_column='PERFORMANCE', blank=True, null=True)
status = models.CharField(db_column='STATUS', max_length=30)
log = models.CharField(db_column='LOG', max_length=250, blank=True)
statustime = models.DateTimeField(db_column='STATUSTIME')
usetime = models.DateTimeField(db_column='USETIME')
class Meta:
db_table = u'redirect'
class RequestStat(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
server = models.CharField(max_length=40, db_column='server')
remote = models.CharField(max_length=40, db_column='remote')
qtime = models.DateTimeField(db_column='qtime')
qduration = models.DateTimeField(db_column='qduration')
duration = models.IntegerField(db_column='duration')
load = models.CharField(max_length=40, db_column='load')
mem = models.CharField(max_length=40, db_column='mem')
urls = models.CharField(max_length=40, db_column='url')
description = models.CharField(max_length=12000, db_column='description')
class Meta:
db_table = u'request_stats'
class Savedpages(models.Model):
name = models.CharField(max_length=90, db_column='NAME', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
html = models.TextField(db_column='HTML')
lastmod = models.DateTimeField(null=True, db_column='LASTMOD', blank=True)
interval = models.IntegerField(null=True, db_column='INTERVAL', blank=True)
class Meta:
db_table = u'savedpages'
unique_together = ('name', 'flag', 'hours')
class Servicelist(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=180, db_column='NAME')
host = models.CharField(max_length=300, db_column='HOST', blank=True)
pid = models.IntegerField(null=True, db_column='PID', blank=True)
userid = models.CharField(max_length=120, db_column='USERID', blank=True)
type = models.CharField(max_length=90, db_column='TYPE', blank=True)
grp = models.CharField(max_length=60, db_column='GRP', blank=True)
description = models.CharField(max_length=600, db_column='DESCRIPTION', blank=True)
url = models.CharField(max_length=600, db_column='URL', blank=True)
testurl = models.CharField(max_length=600, db_column='TESTURL', blank=True)
response = models.CharField(max_length=600, db_column='RESPONSE', blank=True)
tresponse = models.IntegerField(null=True, db_column='TRESPONSE', blank=True)
tstart = models.DateTimeField(db_column='TSTART')
tstop = models.DateTimeField(db_column='TSTOP')
tcheck = models.DateTimeField(db_column='TCHECK')
cyclesec = models.IntegerField(null=True, db_column='CYCLESEC', blank=True)
status = models.CharField(max_length=60, db_column='STATUS')
lastmod = models.DateTimeField(db_column='LASTMOD')
config = models.CharField(max_length=600, db_column='CONFIG', blank=True)
message = models.CharField(max_length=12000, db_column='MESSAGE', blank=True)
restartcmd = models.CharField(max_length=12000, db_column='RESTARTCMD', blank=True)
doaction = models.CharField(max_length=12000, db_column='DOACTION', blank=True)
class Meta:
db_table = u'servicelist'
class Siteaccess(models.Model):
id = models.BigIntegerField(primary_key=True, db_column='ID')
dn = models.CharField(max_length=300, db_column='DN', blank=True)
pandasite = models.CharField(max_length=300, db_column='PANDASITE', blank=True)
poffset = models.BigIntegerField(db_column='POFFSET')
rights = models.CharField(max_length=90, db_column='RIGHTS', blank=True)
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
workinggroups = models.CharField(max_length=300, db_column='WORKINGGROUPS', blank=True)
created = models.DateTimeField(null=True, db_column='CREATED', blank=True)
class Meta:
db_table = u'siteaccess'
class Sitedata(models.Model):
site = models.CharField(max_length=90, db_column='SITE', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
memmin = models.IntegerField(null=True, db_column='MEMMIN', blank=True)
memmax = models.IntegerField(null=True, db_column='MEMMAX', blank=True)
si2000min = models.IntegerField(null=True, db_column='SI2000MIN', blank=True)
si2000max = models.IntegerField(null=True, db_column='SI2000MAX', blank=True)
os = models.CharField(max_length=90, db_column='OS', blank=True)
space = models.CharField(max_length=90, db_column='SPACE', blank=True)
minjobs = models.IntegerField(null=True, db_column='MINJOBS', blank=True)
maxjobs = models.IntegerField(null=True, db_column='MAXJOBS', blank=True)
laststart = models.DateTimeField(null=True, db_column='LASTSTART', blank=True)
lastend = models.DateTimeField(null=True, db_column='LASTEND', blank=True)
lastfail = models.DateTimeField(null=True, db_column='LASTFAIL', blank=True)
lastpilot = models.DateTimeField(null=True, db_column='LASTPILOT', blank=True)
lastpid = models.IntegerField(null=True, db_column='LASTPID', blank=True)
nstart = models.IntegerField(db_column='NSTART')
finished = models.IntegerField(db_column='FINISHED')
failed = models.IntegerField(db_column='FAILED')
defined = models.IntegerField(db_column='DEFINED')
assigned = models.IntegerField(db_column='ASSIGNED')
waiting = models.IntegerField(db_column='WAITING')
activated = models.IntegerField(db_column='ACTIVATED')
holding = models.IntegerField(db_column='HOLDING')
running = models.IntegerField(db_column='RUNNING')
transferring = models.IntegerField(db_column='TRANSFERRING')
getjob = models.IntegerField(db_column='GETJOB')
updatejob = models.IntegerField(db_column='UPDATEJOB')
lastmod = models.DateTimeField(db_column='LASTMOD')
ncpu = models.IntegerField(null=True, db_column='NCPU', blank=True)
nslot = models.IntegerField(null=True, db_column='NSLOT', blank=True)
class Meta:
db_table = u'sitedata'
unique_together = ('site', 'flag', 'hours')
class Siteddm(models.Model):
name = models.CharField(max_length=180, primary_key=True, db_column='NAME')
incmd = models.CharField(max_length=180, db_column='INCMD')
inpath = models.CharField(max_length=600, db_column='INPATH', blank=True)
inopts = models.CharField(max_length=180, db_column='INOPTS', blank=True)
outcmd = models.CharField(max_length=180, db_column='OUTCMD')
outopts = models.CharField(max_length=180, db_column='OUTOPTS', blank=True)
outpath = models.CharField(max_length=600, db_column='OUTPATH')
class Meta:
db_table = u'siteddm'
class Sitehistory(models.Model):
site = models.CharField(max_length=90, db_column='SITE', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
time = models.DateTimeField(db_column='TIME', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
memmin = models.IntegerField(null=True, db_column='MEMMIN', blank=True)
memmax = models.IntegerField(null=True, db_column='MEMMAX', blank=True)
si2000min = models.IntegerField(null=True, db_column='SI2000MIN', blank=True)
si2000max = models.IntegerField(null=True, db_column='SI2000MAX', blank=True)
si2000a = models.IntegerField(null=True, db_column='SI2000A', blank=True)
si2000p = models.IntegerField(null=True, db_column='SI2000P', blank=True)
walla = models.IntegerField(null=True, db_column='WALLA', blank=True)
wallp = models.IntegerField(null=True, db_column='WALLP', blank=True)
os = models.CharField(max_length=90, db_column='OS')
space = models.CharField(max_length=90, db_column='SPACE')
minjobs = models.IntegerField(null=True, db_column='MINJOBS', blank=True)
maxjobs = models.IntegerField(null=True, db_column='MAXJOBS', blank=True)
laststart = models.DateTimeField(null=True, db_column='LASTSTART', blank=True)
lastend = models.DateTimeField(null=True, db_column='LASTEND', blank=True)
lastfail = models.DateTimeField(null=True, db_column='LASTFAIL', blank=True)
lastpilot = models.DateTimeField(null=True, db_column='LASTPILOT', blank=True)
lastpid = models.IntegerField(null=True, db_column='LASTPID', blank=True)
nstart = models.IntegerField(db_column='NSTART')
finished = models.IntegerField(db_column='FINISHED')
failed = models.IntegerField(db_column='FAILED')
defined = models.IntegerField(db_column='DEFINED')
assigned = models.IntegerField(db_column='ASSIGNED')
waiting = models.IntegerField(db_column='WAITING')
activated = models.IntegerField(db_column='ACTIVATED')
running = models.IntegerField(db_column='RUNNING')
getjob = models.IntegerField(db_column='GETJOB')
updatejob = models.IntegerField(db_column='UPDATEJOB')
subtot = models.IntegerField(db_column='SUBTOT')
subdef = models.IntegerField(db_column='SUBDEF')
subdone = models.IntegerField(db_column='SUBDONE')
filemods = models.IntegerField(db_column='FILEMODS')
ncpu = models.IntegerField(null=True, db_column='NCPU', blank=True)
nslot = models.IntegerField(null=True, db_column='NSLOT', blank=True)
class Meta:
db_table = u'sitehistory'
unique_together = ('site', 'time', 'flag', 'hours')
class Sitesinfo(models.Model):
name = models.CharField(db_column='NAME', primary_key=True, max_length=120)
nick = models.CharField(db_column='NICK', max_length=20)
contact = models.CharField(db_column='CONTACT', max_length=30, blank=True)
email = models.CharField(db_column='EMAIL', max_length=30, blank=True)
status = models.CharField(db_column='STATUS', max_length=12, blank=True)
lrc = models.CharField(db_column='LRC', max_length=120, blank=True)
gridcat = models.IntegerField(db_column='GRIDCAT', blank=True, null=True)
monalisa = models.CharField(db_column='MONALISA', max_length=20, blank=True)
computingsite = models.CharField(db_column='COMPUTINGSITE', max_length=20, blank=True)
mainsite = models.CharField(db_column='MAINSITE', max_length=20, blank=True)
home = models.CharField(db_column='HOME', max_length=120, blank=True)
ganglia = models.CharField(db_column='GANGLIA', max_length=120, blank=True)
goc = models.CharField(db_column='GOC', max_length=20, blank=True)
gocconfig = models.IntegerField(db_column='GOCCONFIG', blank=True, null=True)
prodsys = models.CharField(db_column='PRODSYS', max_length=20, blank=True)
dq2svc = models.CharField(db_column='DQ2SVC', max_length=20, blank=True)
usage = models.CharField(db_column='USAGE', max_length=40, blank=True)
updtime = models.IntegerField(db_column='UPDTIME', blank=True, null=True)
ndatasets = models.IntegerField(db_column='NDATASETS', blank=True, null=True)
nfiles = models.IntegerField(db_column='NFILES', blank=True, null=True)
timestamp = models.IntegerField(db_column='TIMESTAMP', blank=True, null=True)
class Meta:
db_table = u'sitesinfo'
class Sitestats(models.Model):
cloud = models.CharField(max_length=30, primary_key=True, db_column='CLOUD')
site = models.CharField(max_length=180, db_column='SITE', blank=True)
at_time = models.DateTimeField(null=True, db_column='AT_TIME', blank=True)
twidth = models.IntegerField(null=True, db_column='TWIDTH', blank=True)
tjob = models.IntegerField(null=True, db_column='TJOB', blank=True)
tgetjob = models.IntegerField(null=True, db_column='TGETJOB', blank=True)
tstagein = models.IntegerField(null=True, db_column='TSTAGEIN', blank=True)
trun = models.IntegerField(null=True, db_column='TRUN', blank=True)
tstageout = models.IntegerField(null=True, db_column='TSTAGEOUT', blank=True)
twait = models.IntegerField(null=True, db_column='TWAIT', blank=True)
nusers = models.IntegerField(null=True, db_column='NUSERS', blank=True)
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
njobs = models.IntegerField(null=True, db_column='NJOBS', blank=True)
nfinished = models.IntegerField(null=True, db_column='NFINISHED', blank=True)
nfailed = models.IntegerField(null=True, db_column='NFAILED', blank=True)
nfailapp = models.IntegerField(null=True, db_column='NFAILAPP', blank=True)
nfailsys = models.IntegerField(null=True, db_column='NFAILSYS', blank=True)
nfaildat = models.IntegerField(null=True, db_column='NFAILDAT', blank=True)
ntimeout = models.IntegerField(null=True, db_column='NTIMEOUT', blank=True)
efficiency = models.IntegerField(null=True, db_column='EFFICIENCY', blank=True)
siteutil = models.IntegerField(null=True, db_column='SITEUTIL', blank=True)
jobtype = models.CharField(max_length=90, db_column='JOBTYPE', blank=True)
proctype = models.CharField(max_length=270, db_column='PROCTYPE', blank=True)
username = models.CharField(max_length=270, db_column='USERNAME', blank=True)
ngetjob = models.IntegerField(null=True, db_column='NGETJOB', blank=True)
nupdatejob = models.IntegerField(null=True, db_column='NUPDATEJOB', blank=True)
release = models.CharField(max_length=270, db_column='RELEASE', blank=True)
nevents = models.BigIntegerField(null=True, db_column='NEVENTS', blank=True)
spectype = models.CharField(max_length=270, db_column='SPECTYPE', blank=True)
tsetup = models.IntegerField(null=True, db_column='TSETUP', blank=True)
class Meta:
db_table = u'sitestats'
class Submithosts(models.Model):
name = models.CharField(max_length=180, db_column='NAME')
nickname = models.CharField(max_length=60, db_column='NICKNAME')
host = models.CharField(max_length=180, primary_key=True, db_column='HOST')
system = models.CharField(max_length=180, db_column='SYSTEM')
rundir = models.CharField(max_length=600, db_column='RUNDIR')
runurl = models.CharField(max_length=600, db_column='RUNURL')
jdltxt = models.CharField(max_length=12000, db_column='JDLTXT', blank=True)
pilotqueue = models.CharField(max_length=60, db_column='PILOTQUEUE', blank=True)
outurl = models.CharField(max_length=600, db_column='OUTURL', blank=True)
class Meta:
db_table = u'submithosts'
class Sysconfig(models.Model):
name = models.CharField(max_length=180, db_column='NAME', primary_key=True)
system = models.CharField(max_length=60, db_column='SYSTEM', primary_key=True)
config = models.CharField(max_length=12000, db_column='CONFIG', blank=True)
class Meta:
db_table = u'sysconfig'
unique_together = ('name', 'system')
class TM4RegionsReplication(models.Model):
tier2 = models.CharField(max_length=150, primary_key=True, db_column='TIER2')
cloud = models.CharField(max_length=90, db_column='CLOUD')
percentage = models.FloatField(null=True, db_column='PERCENTAGE', blank=True)
tier1 = models.CharField(max_length=150, db_column='TIER1')
nsubs = models.IntegerField(null=True, db_column='NSUBS', blank=True)
subsoption = models.CharField(max_length=960, db_column='SUBSOPTION', blank=True)
status = models.CharField(max_length=36, db_column='STATUS', blank=True)
timestamp = models.IntegerField(null=True, db_column='TIMESTAMP', blank=True)
stream_pattern = models.CharField(max_length=96, db_column='STREAM_PATTERN', blank=True)
nreplicas = models.IntegerField(null=True, db_column='NREPLICAS', blank=True)
nsubs_aod = models.IntegerField(null=True, db_column='NSUBS_AOD', blank=True)
nsubs_dpd = models.IntegerField(null=True, db_column='NSUBS_DPD', blank=True)
upd_flag = models.CharField(max_length=12, db_column='UPD_FLAG', blank=True)
esd = models.IntegerField(null=True, db_column='ESD', blank=True)
esd_subsoption = models.CharField(max_length=960, db_column='ESD_SUBSOPTION', blank=True)
desd = models.IntegerField(null=True, db_column='DESD', blank=True)
desd_subsoption = models.CharField(max_length=960, db_column='DESD_SUBSOPTION', blank=True)
prim_flag = models.IntegerField(null=True, db_column='PRIM_FLAG', blank=True)
t2group = models.BigIntegerField(null=True, db_column='T2GROUP', blank=True)
class Meta:
db_table = u't_m4regions_replication'
class TTier2Groups(models.Model):
name = models.CharField(max_length=36, primary_key=True, db_column='NAME')
gid = models.BigIntegerField(null=True, db_column='GID', blank=True)
ntup_share = models.BigIntegerField(null=True, db_column='NTUP_SHARE', blank=True)
timestmap = models.BigIntegerField(null=True, db_column='TIMESTMAP', blank=True)
class Meta:
db_table = u't_tier2_groups'
class Tablepart4Copying(models.Model):
table_name = models.CharField(max_length=90, db_column='TABLE_NAME', primary_key=True)
partition_name = models.CharField(max_length=90, db_column='PARTITION_NAME', primary_key=True)
copied_to_arch = models.CharField(max_length=30, db_column='COPIED_TO_ARCH')
copying_done_on = models.DateTimeField(null=True, db_column='COPYING_DONE_ON', blank=True)
deleted_on = models.DateTimeField(null=True, db_column='DELETED_ON', blank=True)
data_verif_passed = models.CharField(max_length=9, db_column='DATA_VERIF_PASSED', blank=True)
data_verified_on = models.DateTimeField(null=True, db_column='DATA_VERIFIED_ON', blank=True)
class Meta:
db_table = u'tablepart4copying'
unique_together = ('table_name', 'partition_name')
class Taginfo(models.Model):
tag = models.CharField(max_length=90, primary_key=True, db_column='TAG')
description = models.CharField(max_length=300, db_column='DESCRIPTION')
nqueues = models.IntegerField(db_column='NQUEUES')
queues = models.CharField(max_length=12000, db_column='QUEUES', blank=True)
class Meta:
db_table = u'taginfo'
class Tags(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=60, db_column='NAME')
description = models.CharField(max_length=180, db_column='DESCRIPTION')
ugid = models.IntegerField(null=True, db_column='UGID', blank=True)
type = models.CharField(max_length=30, db_column='TYPE')
itemid = models.IntegerField(null=True, db_column='ITEMID', blank=True)
created = models.DateTimeField(db_column='CREATED')
class Meta:
db_table = u'tags'
class Transfercosts(models.Model):
sourcesite = models.CharField(db_column='SOURCESITE', max_length=256)
destsite = models.CharField(db_column='DESTSITE', max_length=256)
type = models.CharField(db_column='TYPE', max_length=256)
status = models.CharField(db_column='STATUS', max_length=64, blank=True)
last_update = models.DateTimeField(db_column='LAST_UPDATE', blank=True, null=True)
cost = models.BigIntegerField(db_column='COST')
max_cost = models.BigIntegerField(db_column='MAX_COST', blank=True, null=True)
min_cost = models.BigIntegerField(db_column='MIN_COST', blank=True, null=True)
class Meta:
db_table = u'transfercosts'
class TransfercostsHistory(models.Model):
sourcesite = models.CharField(db_column='SOURCESITE', primary_key=True, max_length=255)
destsite = models.CharField(max_length=768, db_column='DESTSITE')
type = models.CharField(max_length=768, db_column='TYPE', blank=True)
status = models.CharField(max_length=192, db_column='STATUS', blank=True)
last_update = models.DateTimeField(null=True, db_column='LAST_UPDATE', blank=True)
cost = models.BigIntegerField(db_column='COST')
max_cost = models.BigIntegerField(null=True, db_column='MAX_COST', blank=True)
min_cost = models.BigIntegerField(null=True, db_column='MIN_COST', blank=True)
class Meta:
db_table = u'transfercosts_history'
class TriggersDebug(models.Model):
when = models.DateTimeField(primary_key=True, db_column='WHEN')
what = models.CharField(max_length=300, db_column='WHAT', blank=True)
value = models.CharField(max_length=600, db_column='VALUE', blank=True)
class Meta:
db_table = u'triggers_debug'
class Usagereport(models.Model):
entry = models.IntegerField(primary_key=True, db_column='ENTRY')
flag = models.CharField(max_length=60, db_column='FLAG')
hours = models.IntegerField(null=True, db_column='HOURS', blank=True)
tstart = models.DateTimeField(null=True, db_column='TSTART', blank=True)
tend = models.DateTimeField(null=True, db_column='TEND', blank=True)
tinsert = models.DateTimeField(db_column='TINSERT')
site = models.CharField(max_length=90, db_column='SITE')
nwn = models.IntegerField(null=True, db_column='NWN', blank=True)
class Meta:
db_table = u'usagereport'
class Usercacheusage(models.Model):
username = models.CharField(max_length=384, db_column='USERNAME')
filename = models.CharField(db_column='FILENAME', max_length=255, primary_key=True)
hostname = models.CharField(max_length=192, db_column='HOSTNAME', primary_key=True)
creationtime = models.DateTimeField(db_column='CREATIONTIME', primary_key=True)
modificationtime = models.DateTimeField(null=True, db_column='MODIFICATIONTIME', blank=True)
filesize = models.BigIntegerField(null=True, db_column='FILESIZE', blank=True)
checksum = models.CharField(max_length=108, db_column='CHECKSUM', blank=True)
aliasname = models.CharField(max_length=768, db_column='ALIASNAME', blank=True)
class Meta:
db_table = u'usercacheusage'
unique_together = ('filename', 'hostname', 'creationtime')
class Users(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID')
name = models.CharField(max_length=180, db_column='NAME')
dn = models.CharField(max_length=450, db_column='DN', blank=True)
email = models.CharField(max_length=180, db_column='EMAIL', blank=True)
url = models.CharField(max_length=300, db_column='URL', blank=True)
location = models.CharField(max_length=180, db_column='LOCATION', blank=True)
classa = models.CharField(max_length=90, db_column='CLASSA', blank=True)
classp = models.CharField(max_length=90, db_column='CLASSP', blank=True)
classxp = models.CharField(max_length=90, db_column='CLASSXP', blank=True)
sitepref = models.CharField(max_length=180, db_column='SITEPREF', blank=True)
gridpref = models.CharField(max_length=60, db_column='GRIDPREF', blank=True)
queuepref = models.CharField(max_length=180, db_column='QUEUEPREF', blank=True)
scriptcache = models.CharField(max_length=300, db_column='SCRIPTCACHE', blank=True)
types = models.CharField(max_length=180, db_column='TYPES', blank=True)
sites = models.CharField(max_length=750, db_column='SITES', blank=True)
njobsa = models.IntegerField(null=True, db_column='NJOBSA', blank=True)
njobsp = models.IntegerField(null=True, db_column='NJOBSP', blank=True)
njobs1 = models.IntegerField(null=True, db_column='NJOBS1', blank=True)
njobs7 = models.IntegerField(null=True, db_column='NJOBS7', blank=True)
njobs30 = models.IntegerField(null=True, db_column='NJOBS30', blank=True)
cpua1 = models.BigIntegerField(null=True, db_column='CPUA1', blank=True)
cpua7 = models.BigIntegerField(null=True, db_column='CPUA7', blank=True)
cpua30 = models.BigIntegerField(null=True, db_column='CPUA30', blank=True)
cpup1 = models.BigIntegerField(null=True, db_column='CPUP1', blank=True)
cpup7 = models.BigIntegerField(null=True, db_column='CPUP7', blank=True)
cpup30 = models.BigIntegerField(null=True, db_column='CPUP30', blank=True)
cpuxp1 = models.BigIntegerField(null=True, db_column='CPUXP1', blank=True)
cpuxp7 = models.BigIntegerField(null=True, db_column='CPUXP7', blank=True)
cpuxp30 = models.BigIntegerField(null=True, db_column='CPUXP30', blank=True)
quotaa1 = models.BigIntegerField(null=True, db_column='QUOTAA1', blank=True)
quotaa7 = models.BigIntegerField(null=True, db_column='QUOTAA7', blank=True)
quotaa30 = models.BigIntegerField(null=True, db_column='QUOTAA30', blank=True)
quotap1 = models.BigIntegerField(null=True, db_column='QUOTAP1', blank=True)
quotap7 = models.BigIntegerField(null=True, db_column='QUOTAP7', blank=True)
quotap30 = models.BigIntegerField(null=True, db_column='QUOTAP30', blank=True)
quotaxp1 = models.BigIntegerField(null=True, db_column='QUOTAXP1', blank=True)
quotaxp7 = models.BigIntegerField(null=True, db_column='QUOTAXP7', blank=True)
quotaxp30 = models.BigIntegerField(null=True, db_column='QUOTAXP30', blank=True)
space1 = models.IntegerField(null=True, db_column='SPACE1', blank=True)
space7 = models.IntegerField(null=True, db_column='SPACE7', blank=True)
space30 = models.IntegerField(null=True, db_column='SPACE30', blank=True)
lastmod = models.DateTimeField(db_column='LASTMOD')
firstjob = models.DateTimeField(db_column='FIRSTJOB')
latestjob = models.DateTimeField(db_column='LATESTJOB')
pagecache = models.TextField(db_column='PAGECACHE', blank=True)
cachetime = models.DateTimeField(db_column='CACHETIME')
ncurrent = models.IntegerField(db_column='NCURRENT')
jobid = models.IntegerField(db_column='JOBID')
status = models.CharField(max_length=60, db_column='STATUS', blank=True)
vo = models.CharField(max_length=60, db_column='VO', blank=True)
class Meta:
db_table = u'users'
##FIXME: reenable this after proper dbproxies are introduced!### db_table = u'"ATLAS_PANDAMETA"."USERS"'
allColumns = COLUMNS['ActiveUsers-all']
primaryColumns = [ 'name']
secondaryColumns = []
orderColumns = ORDER_COLUMNS['ActiveUsers-all']
columnTitles = COL_TITLES['ActiveUsers-all']
filterFields = FILTERS['ActiveUsers-all']
def __str__(self):
return 'User: ' + str(self.name) + '[' + str(self.status) + ']'
class Userstats(models.Model):
name = models.CharField(max_length=180, db_column='NAME', primary_key=True)
label = models.CharField(max_length=60, db_column='LABEL', blank=True)
yr = models.IntegerField(db_column='YR', primary_key=True)
mo = models.IntegerField(db_column='MO', primary_key=True)
jobs = models.BigIntegerField(null=True, db_column='JOBS', blank=True)
idlo = models.BigIntegerField(null=True, db_column='IDLO', blank=True)
idhi = models.BigIntegerField(null=True, db_column='IDHI', blank=True)
info = models.CharField(max_length=300, db_column='INFO', blank=True)
class Meta:
db_table = u'userstats'
unique_together = ('name', 'yr', 'mo')
class Usersubs(models.Model):
datasetname = models.CharField(max_length=255, db_column='DATASETNAME', primary_key=True)
site = models.CharField(max_length=192, db_column='SITE', primary_key=True)
creationdate = models.DateTimeField(null=True, db_column='CREATIONDATE', blank=True)
modificationdate = models.DateTimeField(null=True, db_column='MODIFICATIONDATE', blank=True)
nused = models.IntegerField(null=True, db_column='NUSED', blank=True)
state = models.CharField(max_length=90, db_column='STATE', blank=True)
class Meta:
db_table = u'usersubs'
unique_together = ('datasetname', 'site')
class VoToSite(models.Model):
site_name = models.CharField(max_length=96, db_column='SITE_NAME', primary_key=True)
queue = models.CharField(max_length=192, db_column='QUEUE', primary_key=True)
vo_name = models.CharField(max_length=96, db_column='VO_NAME', primary_key=True)
class Meta:
db_table = u'vo_to_site'
unique_together = ('site_name', 'queue', 'vo_name')
class Vorspassfail(models.Model):
site_name = models.CharField(max_length=96, primary_key=True, db_column='SITE_NAME')
passfail = models.CharField(max_length=12, db_column='PASSFAIL')
last_checked = models.DateTimeField(null=True, db_column='LAST_CHECKED', blank=True)
class Meta:
db_table = u'vorspassfail'
class Wndata(models.Model):
site = models.CharField(max_length=90, db_column='SITE', primary_key=True)
wn = models.CharField(max_length=150, db_column='WN', primary_key=True)
flag = models.CharField(max_length=60, db_column='FLAG', primary_key=True)
hours = models.IntegerField(db_column='HOURS', primary_key=True)
mem = models.IntegerField(null=True, db_column='MEM', blank=True)
si2000 = models.IntegerField(null=True, db_column='SI2000', blank=True)
os = models.CharField(max_length=90, db_column='OS', blank=True)
space = models.CharField(max_length=90, db_column='SPACE', blank=True)
maxjobs = models.IntegerField(null=True, db_column='MAXJOBS', blank=True)
laststart = models.DateTimeField(null=True, db_column='LASTSTART', blank=True)
lastend = models.DateTimeField(null=True, db_column='LASTEND', blank=True)
lastfail = models.DateTimeField(null=True, db_column='LASTFAIL', blank=True)
lastpilot = models.DateTimeField(null=True, db_column='LASTPILOT', blank=True)
lastpid = models.IntegerField(null=True, db_column='LASTPID', blank=True)
nstart = models.IntegerField(db_column='NSTART')
finished = models.IntegerField(db_column='FINISHED')
failed = models.IntegerField(db_column='FAILED')
holding = models.IntegerField(db_column='HOLDING')
running = models.IntegerField(db_column='RUNNING')
transferring = models.IntegerField(db_column='TRANSFERRING')
getjob = models.IntegerField(db_column='GETJOB')
updatejob = models.IntegerField(db_column='UPDATEJOB')
lastmod = models.DateTimeField(db_column='LASTMOD')
ncpu = models.IntegerField(null=True, db_column='NCPU', blank=True)
ncpucurrent = models.IntegerField(null=True, db_column='NCPUCURRENT', blank=True)
nslot = models.IntegerField(null=True, db_column='NSLOT', blank=True)
nslotcurrent = models.IntegerField(null=True, db_column='NSLOTCURRENT', blank=True)
class Meta:
db_table = u'wndata'
unique_together = ('site', 'wn', 'flag', 'hours')
| apache-2.0 |
danielfrg/datasciencebox | datasciencebox/salt/_states/conda.py | 1 | 5256 | import os
__virtualname__ = 'conda'
def __virtual__():
"""
Only load if the conda module is available in __salt__
"""
if 'pip.list' in __salt__:
return __virtualname__
return False
def managed(name, packages=None, requirements=None, saltenv='base', user=None):
"""
Create and install python requirements in a conda enviroment
pip is installed by default in the new enviroment
name : path to the enviroment to be created
packages : None
single package or list of packages to install i.e. numpy, scipy=0.13.3, pandas
requirements : None
path to a `requirements.txt` file in the `pip freeze` format
saltenv : 'base'
Salt environment. Usefull when the name is file using the salt file system
(e.g. `salt://.../reqs.txt`)
user
The user under which to run the commands
"""
ret = {'name': name, 'changes': {}, 'comment': '', 'result': True}
comments = []
# Create virutalenv
try:
installation_comment = __salt__['conda.create'](name, user=user)
if installation_comment.endswith('created'):
comments.append('Virtual enviroment "%s" created' % name)
else:
comments.append('Virtual enviroment "%s" already exists' % name)
except Exception as e:
ret['comment'] = e
ret['result'] = False
return ret
# Install packages
if packages is not None:
installation_ret = installed(packages, env=name, saltenv=saltenv, user=user)
ret['result'] = ret['result'] and installation_ret['result']
comments.append('From list [%s]' % installation_ret['comment'])
ret['changes'].update(installation_ret['changes'])
if requirements is not None:
installation_ret = installed(requirements, env=name, saltenv=saltenv, user=user)
ret['result'] = ret['result'] and installation_ret['result']
comments.append('From file [%s]' % installation_ret['comment'])
ret['changes'].update(installation_ret['changes'])
ret['comment'] = '. '.join(comments)
return ret
def installed(name, env=None, saltenv='base', user=None):
"""
Installs a single package, list of packages (comma separated) or packages in a requirements.txt
Checks if the package is already in the environment.
Check ocurres here so is only needed to `conda list` and `pip freeze` once
name
name of the package(s) or path to the requirements.txt
env : None
environment name or path where to put the new enviroment
if None (default) will use the default conda environment (`~/anaconda/bin`)
saltenv : 'base'
Salt environment. Usefull when the name is file using the salt file system
(e.g. `salt://.../reqs.txt`)
user
The user under which to run the commands
"""
ret = {'name': name, 'changes': {}, 'comment': '', 'result': True}
# Generates packages list
packages = []
if os.path.exists(name) or name.startswith('salt://'):
if name.startswith('salt://'):
lines = __salt__['cp.get_file_str'](name, saltenv)
lines = lines.split('\n')
elif os.path.exists(name):
f = open(name, mode='r')
lines = f.readlines()
f.close()
for line in lines:
line = line.strip()
if line != '' and not line.startswith('#'):
line = line.split('#')[0].strip() # Remove inline comments
packages.append(line)
else:
packages = [pkg.strip() for pkg in name.split(',')]
conda_list = __salt__['conda.list'](env=env, user=user)
def extract_info(pkgname):
pkgname, pkgversion = package, ''
pkgname, pkgversion = (package.split('==')[0], package.split('==')[1]
) if '==' in package else (package, pkgversion)
pkgname, pkgversion = (package.split('>=')[0], package.split('>=')[1]
) if '>=' in package else (pkgname, pkgversion)
pkgname, pkgversion = (package.split('>')[0], package.split('>=')[1]
) if '>' in package else (pkgname, pkgversion)
return pkgname, pkgversion
installed, failed, old = 0, 0, 0
for package in packages:
pkgname, pkgversion = extract_info(package)
conda_pkgname = pkgname + ' ' * (26 - len(pkgname)) + pkgversion
if conda_pkgname not in conda_list:
installation = __salt__['conda.install'](package, env=env, user=user)
if installation['retcode'] == 0:
ret['changes'][package] = 'installed'
installed += 1
else:
ret['changes'][package] = installation
failed += 1
else:
old += 1
comments = []
if installed > 0:
comments.append('{0} installed'.format(installed))
if failed > 0:
ret['result'] = False
comments.append('{0} failed'.format(failed))
if old > 0:
comments.append('{0} already installed'.format(old))
ret['comment'] = ', '.join(comments)
return ret
def execcmd(cmd, user=None):
return __salt__['cmd.run_all'](' '.join(cmd), runas=user)
| apache-2.0 |
OpenGenus/cosmos | code/artificial_intelligence/src/artificial_neural_network/ann.py | 3 | 1384 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("dataset.csv")
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features=[1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(
Dense(units=6, kernel_initializer="uniform", activation="relu", input_dim=11)
)
classifier.add(Dense(units=6, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid"))
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
| gpl-3.0 |
pbashivan/EEGLearn | eeglearn/eeg_cnn_lib.py | 1 | 24878 | from __future__ import print_function
import time
import numpy as np
np.random.seed(1234)
from functools import reduce
import math as m
import scipy.io
import theano
import theano.tensor as T
from scipy.interpolate import griddata
from sklearn.preprocessing import scale
from utils import augment_EEG, cart2sph, pol2cart
import lasagne
from lasagne.regularization import regularize_layer_params, regularize_network_params, l1, l2
from lasagne.layers import Conv2DLayer, MaxPool2DLayer, InputLayer
from lasagne.layers import DenseLayer, ElemwiseMergeLayer, FlattenLayer
from lasagne.layers import ConcatLayer, ReshapeLayer, get_output_shape
from lasagne.layers import Conv1DLayer, DimshuffleLayer, LSTMLayer, SliceLayer
def azim_proj(pos):
"""
Computes the Azimuthal Equidistant Projection of input point in 3D Cartesian Coordinates.
Imagine a plane being placed against (tangent to) a globe. If
a light source inside the globe projects the graticule onto
the plane the result would be a planar, or azimuthal, map
projection.
:param pos: position in 3D Cartesian coordinates
:return: projected coordinates using Azimuthal Equidistant Projection
"""
[r, elev, az] = cart2sph(pos[0], pos[1], pos[2])
return pol2cart(az, m.pi / 2 - elev)
def gen_images(locs, features, n_gridpoints, normalize=True,
augment=False, pca=False, std_mult=0.1, n_components=2, edgeless=False):
"""
Generates EEG images given electrode locations in 2D space and multiple feature values for each electrode
:param locs: An array with shape [n_electrodes, 2] containing X, Y
coordinates for each electrode.
:param features: Feature matrix as [n_samples, n_features]
Features are as columns.
Features corresponding to each frequency band are concatenated.
(alpha1, alpha2, ..., beta1, beta2,...)
:param n_gridpoints: Number of pixels in the output images
:param normalize: Flag for whether to normalize each band over all samples
:param augment: Flag for generating augmented images
:param pca: Flag for PCA based data augmentation
:param std_mult Multiplier for std of added noise
:param n_components: Number of components in PCA to retain for augmentation
:param edgeless: If True generates edgeless images by adding artificial channels
at four corners of the image with value = 0 (default=False).
:return: Tensor of size [samples, colors, W, H] containing generated
images.
"""
feat_array_temp = []
nElectrodes = locs.shape[0] # Number of electrodes
# Test whether the feature vector length is divisible by number of electrodes
assert features.shape[1] % nElectrodes == 0
n_colors = features.shape[1] / nElectrodes
for c in range(n_colors):
feat_array_temp.append(features[:, c * nElectrodes : nElectrodes * (c+1)])
if augment:
if pca:
for c in range(n_colors):
feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=True, n_components=n_components)
else:
for c in range(n_colors):
feat_array_temp[c] = augment_EEG(feat_array_temp[c], std_mult, pca=False, n_components=n_components)
n_samples = features.shape[0]
# Interpolate the values
grid_x, grid_y = np.mgrid[
min(locs[:, 0]):max(locs[:, 0]):n_gridpoints*1j,
min(locs[:, 1]):max(locs[:, 1]):n_gridpoints*1j
]
temp_interp = []
for c in range(n_colors):
temp_interp.append(np.zeros([n_samples, n_gridpoints, n_gridpoints]))
# Generate edgeless images
if edgeless:
min_x, min_y = np.min(locs, axis=0)
max_x, max_y = np.max(locs, axis=0)
locs = np.append(locs, np.array([[min_x, min_y], [min_x, max_y], [max_x, min_y], [max_x, max_y]]), axis=0)
for c in range(n_colors):
feat_array_temp[c] = np.append(feat_array_temp[c], np.zeros((n_samples, 4)), axis=1)
# Interpolating
for i in xrange(n_samples):
for c in range(n_colors):
temp_interp[c][i, :, :] = griddata(locs, feat_array_temp[c][i, :], (grid_x, grid_y),
method='cubic', fill_value=np.nan)
print('Interpolating {0}/{1}\r'.format(i + 1, n_samples), end='\r')
# Normalizing
for c in range(n_colors):
if normalize:
temp_interp[c][~np.isnan(temp_interp[c])] = \
scale(temp_interp[c][~np.isnan(temp_interp[c])])
temp_interp[c] = np.nan_to_num(temp_interp[c])
return np.swapaxes(np.asarray(temp_interp), 0, 1) # swap axes to have [samples, colors, W, H]
def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):
"""
Builds a VGG style CNN network followed by a fully-connected layer and a softmax layer.
Stacks are separated by a maxpool layer. Number of kernels in each layer is twice
the number in previous stack.
input_var: Theano variable for input to the network
outputs: pointer to the output of the last layer of network (softmax)
:param input_var: theano variable as input to the network
:param w_init: Initial weight values
:param n_layers: number of layers in each stack. An array of integers with each
value corresponding to the number of layers in each stack.
(e.g. [4, 2, 1] == 3 stacks with 4, 2, and 1 layers in each.
:param n_filters_first: number of filters in the first layer
:param imsize: Size of the image
:param n_colors: Number of color channels (depth)
:return: a pointer to the output of last layer
"""
weights = [] # Keeps the weights for all layers
count = 0
# If no initial weight is given, initialize with GlorotUniform
if w_init is None:
w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)
# Input layer
network = InputLayer(shape=(None, n_colors, imsize, imsize),
input_var=input_var)
for i, s in enumerate(n_layers):
for l in range(s):
network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),
W=w_init[count], pad='same')
count += 1
weights.append(network.W)
network = MaxPool2DLayer(network, pool_size=(2, 2))
return network, weights
def build_convpool_max(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with maxpooling layer in time.
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(convnet)
# convpooling using Max pooling over frames
convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)
# A fully-connected layer of 512 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the output layer with 50% dropout on its inputs:
convpool = lasagne.layers.DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
convpool = DimshuffleLayer(convpool, (0, 2, 1))
# input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
convpool = Conv1DLayer(convpool, 64, 3)
# A fully-connected layer of 512 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the output layer with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with LSTM layer to integrate time from sequences of EEG images.
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param grad_clip: the gradient messages are clipped to the given value during
the backward pass.
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
# Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.tanh)
# We only need the final prediction, we isolate that quantity and feed it
# to the next layer.
convpool = SliceLayer(convpool, -1, 1) # Selecting the last prediction
# A fully-connected layer of 256 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the output layer with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with LSTM and 1D-conv layers combined
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param grad_clip: the gradient messages are clipped to the given value during
the backward pass.
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
# input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
conv_out = Conv1DLayer(reformConvpool, 64, 3)
conv_out = FlattenLayer(conv_out)
# Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.tanh)
lstm_out = SliceLayer(lstm, -1, 1)
# Merge 1D-Conv and LSTM outputs
dense_input = ConcatLayer([conv_out, lstm_out])
# A fully-connected layer of 256 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
convpool = DenseLayer(convpool,
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
"""
Iterates over the samples returing batches of size batchsize.
:param inputs: input data array. It should be a 4D numpy array for images [n_samples, n_colors, W, H] and 5D numpy
array if working with sequence of images [n_timewindows, n_samples, n_colors, W, H].
:param targets: vector of target labels.
:param batchsize: Batch size
:param shuffle: Flag whether to shuffle the samples before iterating or not.
:return: images and labels for a batch
"""
if inputs.ndim == 4:
input_len = inputs.shape[0]
elif inputs.ndim == 5:
input_len = inputs.shape[1]
assert input_len == len(targets)
if shuffle:
indices = np.arange(input_len)
np.random.shuffle(indices)
for start_idx in range(0, input_len, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
if inputs.ndim == 4:
yield inputs[excerpt], targets[excerpt]
elif inputs.ndim == 5:
yield inputs[:, excerpt], targets[excerpt]
def train(images, labels, fold, model_type, batch_size=32, num_epochs=5):
"""
A sample training function which loops over the training set and evaluates the network
on the validation set after each epoch. Evaluates the network on the training set
whenever the
:param images: input images
:param labels: target labels
:param fold: tuple of (train, test) index numbers
:param model_type: model type ('cnn', '1dconv', 'maxpool', 'lstm', 'mix')
:param batch_size: batch size for training
:param num_epochs: number of epochs of dataset to go over for training
:return: none
"""
num_classes = len(np.unique(labels))
(X_train, y_train), (X_val, y_val), (X_test, y_test) = reformatInput(images, labels, fold)
X_train = X_train.astype("float32", casting='unsafe')
X_val = X_val.astype("float32", casting='unsafe')
X_test = X_test.astype("float32", casting='unsafe')
# Prepare Theano variables for inputs and targets
input_var = T.TensorType('floatX', ((False,) * 5))()
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
# Building the appropriate model
if model_type == '1dconv':
network = build_convpool_conv1d(input_var, num_classes)
elif model_type == 'maxpool':
network = build_convpool_max(input_var, num_classes)
elif model_type == 'lstm':
network = build_convpool_lstm(input_var, num_classes, 100)
elif model_type == 'mix':
network = build_convpool_mix(input_var, num_classes, 100)
elif model_type == 'cnn':
input_var = T.tensor4('inputs')
network, _ = build_cnn(input_var)
network = DenseLayer(lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
network = DenseLayer(lasagne.layers.dropout(network, p=.5),
num_units=num_classes,
nonlinearity=lasagne.nonlinearities.softmax)
else:
raise ValueError("Model not supported ['1dconv', 'maxpool', 'lstm', 'mix', 'cnn']")
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
reg_factor = 1e-4
l2_penalty = regularize_network_params(network, l2) * reg_factor
loss += l2_penalty
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.adam(loss, params, learning_rate=0.001)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
best_validation_accu = 0
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=False):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
av_train_err = train_err / train_batches
av_val_err = val_err / val_batches
av_val_acc = val_acc / val_batches
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(av_train_err))
print(" validation loss:\t\t{:.6f}".format(av_val_err))
print(" validation accuracy:\t\t{:.2f} %".format(av_val_acc * 100))
if av_val_acc > best_validation_accu:
best_validation_accu = av_val_acc
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
av_test_err = test_err / test_batches
av_test_acc = test_acc / test_batches
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(av_test_err))
print(" test accuracy:\t\t{:.2f} %".format(av_test_acc * 100))
# Dump the network weights to a file like this:
np.savez('weights_lasg_{0}'.format(model_type), *lasagne.layers.get_all_param_values(network))
print('-'*50)
print("Best validation accuracy:\t\t{:.2f} %".format(best_validation_accu * 100))
print("Best test accuracy:\t\t{:.2f} %".format(av_test_acc * 100))
return av_test_acc
if __name__ == '__main__':
from utils import reformatInput
# Load electrode locations
print('Loading data...')
locs = scipy.io.loadmat('../Sample data/Neuroscan_locs_orig.mat')
locs_3d = locs['A']
locs_2d = []
# Convert to 2D
for e in locs_3d:
locs_2d.append(azim_proj(e))
feats = scipy.io.loadmat('../Sample data/FeatureMat_timeWin.mat')['features']
subj_nums = np.squeeze(scipy.io.loadmat('../Sample data/trials_subNums.mat')['subjectNum'])
# Leave-Subject-Out cross validation
fold_pairs = []
for i in np.unique(subj_nums):
ts = subj_nums == i
tr = np.squeeze(np.nonzero(np.bitwise_not(ts)))
ts = np.squeeze(np.nonzero(ts))
np.random.shuffle(tr) # Shuffle indices
np.random.shuffle(ts)
fold_pairs.append((tr, ts))
# CNN Mode
print('Generating images...')
# Find the average response over time windows
av_feats = reduce(lambda x, y: x+y, [feats[:, i*192:(i+1)*192] for i in range(feats.shape[1] / 192)])
av_feats = av_feats / (feats.shape[1] / 192)
images = gen_images(np.array(locs_2d),
av_feats,
32, normalize=True)
print('\n')
# Class labels should start from 0
print('Training the CNN Model...')
test_acc_cnn = []
for i in range(len(fold_pairs)):
print('fold {0}/{1}'.format(i + 1, len(fold_pairs)))
test_acc_cnn.append(train(images, np.squeeze(feats[:, -1]) - 1, fold_pairs[i], 'cnn', num_epochs=10))
# Conv-LSTM Mode
print('Generating images for all time windows...')
images_timewin = np.array([gen_images(np.array(locs_2d),
feats[:, i * 192:(i + 1) * 192], 32, normalize=True) for i in
range(feats.shape[1] / 192)
])
print('\n')
print('Training the LSTM-CONV Model...')
test_acc_mix = []
for i in range(len(fold_pairs)):
print('fold {0}/{1}'.format(i+1, len(fold_pairs)))
test_acc_mix.append(train(images_timewin, np.squeeze(feats[:, -1]) - 1, fold_pairs[i], 'mix', num_epochs=10))
print('*' * 40)
print('Average MIX test accuracy: {0}'.format(np.mean(test_acc_mix)*100))
print('Average CNN test accuracy: {0}'.format(np.mean(test_acc_cnn) * 100))
print('*' * 40)
print('Done!')
| gpl-2.0 |
NickC1/skedm | build/lib/skedm/utilities.py | 1 | 9845 | """
Metrics for scoring predictions and also some more specialized
math needed for skedm
"""
import numpy as np
from scipy import stats as stats
from numba import jit
def weighted_mean(X, distances ):
"""
Calculates the weighted mean given a set of values and their corresponding
distances. Only 1/distance is implemented. This essentially is just a
weighted mean down axis=1.
Parameters
----------
X : 2d array
Training values. shape(nsamples,number near neighbors)
distances : 2d array
Sorted distances to the near neighbors for the indices.
shape(nsamples,number near neighbors)
Returns
-------
w_mean : 2d array
Weighted predictions
"""
distances = distances+0.00001 #ensures no zeros when dividing
W = 1./distances
denom = np.sum(W, axis=1,keepdims=True)
W/=denom
w_mean = np.sum(X * W, axis=1)
return w_mean.ravel()
def mi_digitize(X):
"""
Digitize a time series for mutual information analysis
Parameters
----------
X : 1D array
array to be digitized of length m
Returns
-------
Y : 1D array
digitized array of length m
"""
minX = np.min(X) - 1e-5 #subtract for correct binning
maxX = np.max(X) + 1e-5 #add for correct binning
nbins = int(np.sqrt(len(X)/20))
nbins = max(4,nbins) #make sure there are atleast four bins
bins = np.linspace(minX, maxX, nbins+1) #add one for correct num bins
Y = np.digitize(X, bins)
return Y
def corrcoef(preds,actual):
"""
Correlation Coefficient of between predicted values and actual values
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.corrcoef(preds,actual)[1,0]
return cc
def classCompare(preds,actual):
"""
Percent correct between predicted values and actual values
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.mean( preds == actual )
return cc
def classificationError(preds,actual):
"""
Percent correct between predicted values and actual values scaled
to the most common prediction of the space
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
most_common,_=stats.mode(actual,axis=None)
num = np.mean(preds == actual)
denom = np.mean(actual == most_common)
cc = num/denom.astype('float')
return cc
def kleckas_tau(preds,actual):
"""
Calculates kleckas tau
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
ncorr = np.sum(preds == actual) #number correctly classified
cats_unique = np.unique(actual)
sum_t = 0
for cat in cats_unique:
ni = np.sum(cat==actual)
pi = float(ni)/len(preds)
sum_t += ni*pi
tau = (ncorr - sum_t) / (len(preds) - sum_t)
return tau
def cohens_kappa(preds,actual):
"""
Calculates cohens kappa
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
c = cohen_kappa_score(preds,actual)
return c
def klekas_tau_spatial(X,max_lag,percent_calc=.5):
"""
Similar to mutual_information_spatial, it calculates the kleckas tau value
between a shifted and unshifted slice of the space. It makes slices in both
the rows and the columns.
Parameters
----------
X : 2-D array
input two-dimensional image
max_lag : integer
maximum amount to shift the space
percent_calc : float
How many rows and columns to use average over. Using the whole space
is overkill.
Returns
-------
R_mut : 1-D array
the mutual inforation averaged down the rows (vertical)
C_mut : 1-D array
the mutual information averaged across the columns (horizontal)
r_mi : 2-D array
the mutual information down each row (vertical)
c_mi : 2-D array
the mutual information across each columns (horizontal)
"""
rs, cs = np.shape(X)
rs_iters = int(rs*percent_calc)
cs_iters = int(cs*percent_calc)
r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)
c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)
# The r_picks are used to calculate the MI in the columns
# and the c_picks are used to calculate the MI in the rows
c_mi = np.zeros((max_lag,rs_iters))
r_mi = np.zeros((max_lag,cs_iters))
for ii in range(rs_iters):
m_slice = X[r_picks[ii],:]
for j in range(max_lag):
shift = j+1
new_m = m_slice[:-shift]
shifted = m_slice[shift:]
c_mi[j,ii] = kleckas_tau(new_m,shifted)
for ii in range(cs_iters):
m_slice = X[:,c_picks[ii]]
for j in range(max_lag):
shift = j+1
new_m = m_slice[:-shift]
shifted = m_slice[shift:]
r_mi[j,ii] = kleckas_tau(new_m,shifted)
r_mut = np.mean(r_mi,axis=1)
c_mut = np.mean(c_mi,axis=1)
return r_mut, c_mut, r_mi, c_mi
def varianceExplained(preds,actual):
"""
Explained variance between predicted values and actual values scaled
to the most common prediction of the space
Parameters
----------
preds : array shape (num samples,num targets)
actual : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.var(preds - actual) / np.var(actual)
return cc
def score(preds,actual):
"""
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible
score is 1.0, lower values are worse.
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
u = np.square(actual - preds ).sum()
v = np.square(actual - actual.mean()).sum()
r2 = 1 - u/v
if v == 0.:
print('Targets are all the same. Returning 0.')
r2=0
return r2
def weighted_mode(a, w, axis=0):
"""This function is borrowed from sci-kit learn's extmath.py
Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
print('both weights')
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
@jit
def quick_mode_axis1(X):
"""
Takes the mode of an array across the columns. aka axis=1
X : np.array
"""
X = X.astype(int)
len_x = len(X)
mode = np.zeros(len_x)
for i in range(len_x):
mode[i] = np.bincount(X[i,:]).argmax()
return mode
@jit
def quick_mode_axis1_keep_nearest_neigh(X):
"""
The current implementation of the mode takes the lowest value instead of
the closest value. For example if the neighbors have values:
[7,7,2,3,4,1,1] the current implementation will keep 1 as the value. For
our purposes, the ordering is important, so we want to keep the first value.
"""
X = X.astype(int)
len_x = len(X)
mode = np.zeros(len_x)
for i in range(len_x):
loc = np.bincount(X[i,:])[X[i,:]].argmax() #reorder before argmax
mode[i] = X[i,:][loc]
return mode
def keep_diversity(X,thresh=1.):
"""
Throws out rows of only one class.
X : 2d array of ints
Returns
keep : 1d boolean
ex:
[1 1 1 1]
[2 1 2 3]
[2 2 2 2]
[3 2 1 4]
returns:
[F]
[T]
[F]
[T]
"""
X = X.astype(int)
mode = quick_mode_axis1(X).reshape(-1,1)
compare = np.repeat(mode,X.shape[1],axis=1)
thresh = int(thresh*X.shape[1])
keep = np.sum(compare==X, axis=1) < X.shape[1]
return keep
| mit |
exepulveda/swfc | python/spatial_correction_kmean_2d.py | 1 | 2530 | import sys
import random
import logging
import collections
import math
import sys
import json
sys.path += ['..']
import numpy as np
import scipy.stats
from graph_labeling import graph_cut, make_neighbourhood
from scipy.spatial import cKDTree
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from case_study_2d import attributes,setup_case_study,setup_distances
'''
This script if for adpat yhe Kmeans results in order to use spatail correction
'''
CHECK_VALID = False
if __name__ == "__main__":
locations,ore_indices,locations_ore,data_ore,min_values,max_values,scale,var_types,categories = setup_case_study()
data = data_ore.copy()
seed = 1634120
np.random.seed(seed)
lambda_value = 0.25
NC = 4
target = False
force = False
filename_template_ew = "../results/final_2d_{tag}_fcew_4.csv"
filename_template = "../results/final_2d_{tag}_fc_4.csv"
#clusters_ew = np.loadtxt(filename_template_ew.format(tag='clusters'),delimiter=",")[:,2]
#clusters = np.loadtxt(filename_template.format(tag='clusters'),delimiter=",")[:,2]
best_u = np.loadtxt(filename_template.format(tag='u'),delimiter=",")
best_u_ew = np.loadtxt(filename_template_ew.format(tag='u'),delimiter=",")
cluster_ew = np.argmax(best_u_ew,axis=1)
cluster = np.argmax(best_u,axis=1)
N,ND = data.shape
knn = 8
#create neighbourdhood EW
print("building neighbourhood, location.shape=",locations.shape)
kdtree = cKDTree(locations_ore)
neighbourhood,distances = make_neighbourhood(kdtree,locations_ore,knn,max_distance=np.inf)
distances = np.array(distances)
#spatial EW
verbose_level = 2
clusters_graph_ew = graph_cut(locations_ore,neighbourhood,best_u_ew,unary_constant=100.0,smooth_constant=80.0,verbose=1)
for i in range(N):
print(i,cluster_ew[i],best_u_ew[i],cluster_ew[neighbourhood[i]],clusters_graph_ew[i],sep=',')
np.savetxt("../results/final_2d_clusters_sfcew_4.csv",clusters_graph_ew,delimiter=",",fmt="%.4f")
#spatial
verbose_level = 2
clusters_graph = graph_cut(locations_ore,neighbourhood,best_u,unary_constant=100.0,smooth_constant=50.0,verbose=1)
for i in range(N):
print(i,cluster[i],best_u[i],cluster[neighbourhood[i]],clusters_graph[i],sep=',')
np.savetxt("../results/final_2d_clusters_sfc_4.csv",clusters_graph,delimiter=",",fmt="%.4f")
| gpl-3.0 |
kshedstrom/pyroms | examples/cobalt-preproc/Boundary_bio/remap_bdry_bio.py | 1 | 6606 | import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bdry_bio(argdict, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
# NWGOA3 grid sub-sample
xrange=src_grd.xrange; yrange=src_grd.yrange
src_varname = argdict['tracer']
tracer = src_varname
src_file = argdict['file']
units = argdict['units']
longname = argdict['longname']
kt = argdict['frame']
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# create boundary file
dst_file = tracer + '.nc'
dst_file = dst_dir + dst_grd.name + '_bdry_bio_' + dst_file
print 'Creating boundary file', dst_file
if os.path.exists(dst_file) is True:
os.remove(dst_file)
pyroms_toolbox.nc_create_roms_bdry_file(dst_file, dst_grd, nctime)
# open boundary file
nc = netCDF.Dataset(dst_file, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname]
# correct time to some classic value
days_in_month = np.array([31,28.25,31,30,31,30,31,31,30,31,30,31])
time = days_in_month[:kt].sum() + days_in_month[kt] / 2.
#get missing value
spval = src_var._FillValue
# determine variable dimension
ndim = len(src_var.dimensions) - 1
# NWGOA3 grid sub-sample
if ndim == 3:
src_var = src_var[kt,:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
elif ndim == 2:
src_var = src_var[kt,yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_ESM2M_to_NWGOA3_bilinear_t_to_rho.nc'
dst_varname = tracer
dst_varname_north = tracer + '_north'
dimensions_north = ('ocean_time', 's_rho', 'xi_rho')
long_name_north = longname + ' north boundary condition'
field_north = tracer + '_north, scalar, series'
dst_varname_south = tracer + '_south'
dimensions_south = ('ocean_time', 's_rho', 'xi_rho')
long_name_south = longname + ' south boundary condition'
field_south = tracer + '_south, scalar, series'
dst_varname_east = tracer + '_east'
dimensions_east = ('ocean_time', 's_rho', 'eta_rho')
long_name_east = longname + ' east boundary condition'
field_east = tracer + '_east, scalar, series'
dst_varname_west = tracer + '_west'
dimensions_west = ('ocean_time', 's_rho', 'eta_rho')
long_name_west = longname + ' west boundary condition'
field_west = tracer + '_west, scalar, series'
units = units
if ndim == 3:
# build intermediate zgrid
zlevel = -z[::-1]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in boudary file
print 'Creating variable', dst_varname_north
nc.createVariable(dst_varname_north, 'f8', dimensions_north, fill_value=spval)
nc.variables[dst_varname_north].long_name = long_name_north
nc.variables[dst_varname_north].units = units
nc.variables[dst_varname_north].field = field_north
print 'Creating variable', dst_varname_south
nc.createVariable(dst_varname_south, 'f8', dimensions_south, fill_value=spval)
nc.variables[dst_varname_south].long_name = long_name_south
nc.variables[dst_varname_south].units = units
nc.variables[dst_varname_south].field = field_south
print 'Creating variable', dst_varname_west
nc.createVariable(dst_varname_west, 'f8', dimensions_west, fill_value=spval)
nc.variables[dst_varname_west].long_name = long_name_west
nc.variables[dst_varname_west].units = units
nc.variables[dst_varname_west].field = field_west
print 'Creating variable', dst_varname_east
nc.createVariable(dst_varname_east, 'f8', dimensions_east, fill_value=spval)
nc.variables[dst_varname_east].long_name = long_name_east
nc.variables[dst_varname_east].units = units
nc.variables[dst_varname_east].field = field_east
# remapping
print 'remapping', dst_varname, 'from', src_grd.name, \
'to', dst_grd.name
if ndim == 3:
# flood the grid
print 'flood the grid'
src_varz = pyroms_toolbox.BGrid_GFDL.flood(src_var, src_grd, Bpos=Bpos, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_varz = src_var
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, spval=spval)
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_var_north = pyroms.remapping.z2roms(dst_varz[::-1, Mp-1:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-1,Mp))
dst_var_south = pyroms.remapping.z2roms(dst_varz[::-1, 0:1, :], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,1))
dst_var_east = pyroms.remapping.z2roms(dst_varz[::-1, :, Lp-1:Lp], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(Lp-1,Lp), jrange=(0,Mp))
dst_var_west = pyroms.remapping.z2roms(dst_varz[::-1, :, 0:1], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(0,1), jrange=(0,Mp))
else:
dst_var_north = dst_varz[-1, :]
dst_var_south = dst_varz[0, :]
dst_var_east = dst_varz[:, -1]
dst_var_west = dst_varz[:, 0]
# write data in destination file
print 'write data in destination file\n'
nc.variables['ocean_time'][0] = time
nc.variables['ocean_time'].cycle_length = 365.25
nc.variables[dst_varname_north][0] = np.squeeze(dst_var_north)
nc.variables[dst_varname_south][0] = np.squeeze(dst_var_south)
nc.variables[dst_varname_east][0] = np.squeeze(dst_var_east)
nc.variables[dst_varname_west][0] = np.squeeze(dst_var_west)
# close file
nc.close()
cdf.close()
if src_varname == 'eta':
return dst_varz
| bsd-3-clause |
sara-02/fabric8-analytics-stack-analysis | analytics_platform/kronos/pgm/src/offline_training.py | 1 | 6130 | """Functions to perform offline training for Kronos PGM."""
import sys
import time
import os
from analytics_platform.kronos.src import config
import analytics_platform.kronos.pgm.src.pgm_constants as pgm_constants
from analytics_platform.kronos.pgm.src.pgm_pomegranate import PGMPomegranate
from util.analytics_platform_util import get_path_names
from util.data_store.s3_data_store import S3DataStore
def load_eco_to_kronos_dependency_dict(input_kronos_dependency_data_store, additional_path):
"""Load the Kronos dependency dictionary from the selected storage."""
eco_to_kronos_dependency_dict = dict()
filenames = input_kronos_dependency_data_store.list_files(os.path.join(
additional_path,
pgm_constants.KD_OUTPUT_FOLDER))
for filename in filenames:
ecosystem = filename.split("/")[-1].split(".")[0].split("_")[-1]
kronos_dependency_json = input_kronos_dependency_data_store.read_json_file(
filename=filename)
kronos_dependency_dict = dict(kronos_dependency_json)
eco_to_kronos_dependency_dict[ecosystem] = kronos_dependency_dict
return eco_to_kronos_dependency_dict
def load_eco_to_kronos_dependency_dict_s3(bucket_name, additional_path):
"""Load the Kronos dependency dictionary from the AWS S3 storage."""
input_data_store = S3DataStore(src_bucket_name=bucket_name,
access_key=config.AWS_S3_ACCESS_KEY_ID,
secret_key=config.AWS_S3_SECRET_ACCESS_KEY)
eco_to_kronos_dependency_dict = load_eco_to_kronos_dependency_dict(
input_kronos_dependency_data_store=input_data_store, additional_path=additional_path)
return eco_to_kronos_dependency_dict
def load_user_eco_to_co_occerrence_matrix_dict(input_co_occurrence_data_store, additional_path):
"""Create the cooccurrence matrix from user categories."""
com_filenames = input_co_occurrence_data_store.list_files(os.path.join(
additional_path, pgm_constants.COM_OUTPUT_FOLDER))
temp_user_eco_to_co_occurrence_matrix_dict = dict()
user_category_list = list()
ecosystem_list = list()
for com_filename in com_filenames:
user_category = com_filename.split("/")[-2]
if user_category not in user_category_list:
user_category_list.append(user_category)
ecosystem = com_filename.split("/")[-1].split(".")[0].split("_")[-1]
if ecosystem not in ecosystem_list:
ecosystem_list.append(ecosystem)
co_occurrence_matrix = input_co_occurrence_data_store.read_json_file_into_pandas_df(
com_filename)
temp_user_eco_to_co_occurrence_matrix_dict[
(user_category, ecosystem)] = co_occurrence_matrix
user_eco_to_co_occurrence_matrix_dict = dict()
for user_category in user_category_list:
eco_to_co_occurrence_matrix_dict = dict()
for ecosystem in ecosystem_list:
eco_to_co_occurrence_matrix_dict[ecosystem] = \
temp_user_eco_to_co_occurrence_matrix_dict[(user_category, ecosystem)]
user_eco_to_co_occurrence_matrix_dict[user_category] = eco_to_co_occurrence_matrix_dict
return user_eco_to_co_occurrence_matrix_dict
def train_and_save_kronos_list(input_kronos_dependency_data_store, input_co_occurrence_data_store,
output_data_store, additional_path):
"""Train the Kronos and save the results into the selected storage."""
eco_to_kronos_dependency_dict = load_eco_to_kronos_dependency_dict(
input_kronos_dependency_data_store=input_kronos_dependency_data_store,
additional_path=additional_path)
user_eco_to_cooccurrence_matrix_dict = load_user_eco_to_co_occerrence_matrix_dict(
input_co_occurrence_data_store=input_co_occurrence_data_store,
additional_path=additional_path)
for user_category in user_eco_to_cooccurrence_matrix_dict.keys():
eco_to_cooccurrence_matrix_dict = user_eco_to_cooccurrence_matrix_dict[user_category]
for ecosystem in eco_to_cooccurrence_matrix_dict.keys():
kronos_dependency_dict = eco_to_kronos_dependency_dict[ecosystem]
cooccurrence_matrix_df = eco_to_cooccurrence_matrix_dict[ecosystem]
kronos_model = PGMPomegranate.train(kronos_dependency_dict=kronos_dependency_dict,
package_occurrence_df=cooccurrence_matrix_df)
filename = os.path.join(pgm_constants.KRONOS_OUTPUT_FOLDER, str(user_category),
"kronos_{}.json".format(str(ecosystem)))
kronos_model.save(data_store=output_data_store,
filename=additional_path + filename)
def train_and_save_kronos_list_s3(training_data_url):
"""Train the Kronos and save the results into the AWS S3 storage."""
input_bucket_name, output_bucket_name, additional_path = get_path_names(
training_data_url)
input_kronos_dependency_data_store = S3DataStore(src_bucket_name=input_bucket_name,
access_key=config.AWS_S3_ACCESS_KEY_ID,
secret_key=config.AWS_S3_SECRET_ACCESS_KEY)
input_cooccurrence_matrix_data_store = S3DataStore(src_bucket_name=input_bucket_name,
access_key=config.AWS_S3_ACCESS_KEY_ID,
secret_key=config.AWS_S3_SECRET_ACCESS_KEY)
output_data_store = S3DataStore(src_bucket_name=output_bucket_name,
access_key=config.AWS_S3_ACCESS_KEY_ID,
secret_key=config.AWS_S3_SECRET_ACCESS_KEY)
train_and_save_kronos_list(
input_kronos_dependency_data_store=input_kronos_dependency_data_store,
input_co_occurrence_data_store=input_cooccurrence_matrix_data_store,
output_data_store=output_data_store, additional_path=additional_path)
| gpl-3.0 |
DerThorsten/seglib | seglibpython/seglib/clustering/ce_multicut.py | 1 | 7168 | from seglib import cgp2d
from seglib.preprocessing import norm01
import opengm
import numpy
import vigra
from sklearn.cluster import Ward,WardAgglomeration
class CgpClustering(object):
def __init__(self,cgp):
self.cgp = cgp
self.labels = numpy.zeros(self.cgp.numCells(2),dtype=numpy.uint64)
class HierarchicalClustering(CgpClustering):
def __init__(self,cgp):
super(HierarchicalClustering, self).__init__(cgp)
self.connectivity = cgp.sparseAdjacencyMatrix()
def segment(self,features,nClusters):
#print "features",features.shape
#print "self.connectivity",self.connectivity.shape
self.ward = WardAgglomeration(n_clusters=nClusters, connectivity=self.connectivity).fit(features.T)
self.labels[:] = self.ward.labels_
def mergedCgp(self):
newLabels = self.cgp.featureToImage(cellType=2,features=self.labels.astype(numpy.float32),useTopologicalShape=False)
cgp,tgrid = cgp2d.cgpFromLabels(newLabels.astype(numpy.uint64)+1)
return cgp,tgrid
def probabilityToWeights(p1,out,beta=0.5):
assert len(out)==len(p1)
p0 = 1.0 - p1
out[:]=numpy.log( p0 / p1 ) + numpy.log((1.0-beta)/beta)
return out
def sampleFromGauss(mean,std,out):
#print "mean",mean.shape
#print "std",std.shape
#print "out",out.shape
assert len(mean)==len(std)
assert len(out)==len(mean)
n = len(mean)
samples = numpy.random.standard_normal(n)
samples *=std
samples +=mean
return samples
def gaussOffset(mean,std):
return std*float(numpy.random.standard_normal(1))+mean
def gradientToWeight(gradient,gamma):
#normGrad = norm01(gradient)
e = numpy.exp(-gamma*gradient)
e1 = e
e0 = 1.0-e1
"""
print "g ",gradient[:5]
print "e0",e0[:5]
print "e1",e1[:5]
print "w ",(e0-e1)[:5]
"""
return e1-e0
def imgToWeight(cgp,img,gamma,method='exp'):
if tuple(cgp.shape)!=(img.shape):
img=vigra.sampling.resize(img,cgp.shape)
img =norm01(img)+0.1
img/=1.1
accgrad = cgp.accumulateCellFeatures(cellType=1,image=img,features='Mean')[0]['Mean']
if method =='exp':
weights = gradientToWeight(gradient=accgrad,gamma=gamma)
return weights
else :
raise RuntimeError("not impl")
def multicutFromCgp(cgp,weights=None,parameter=None):
boundArray = cgp.cell1BoundsArray()-1
nVar = cgp.numCells(2)
nFac = cgp.numCells(1)
space = numpy.ones(nVar,dtype=opengm.label_type)*nVar
gm = opengm.gm(space)
wZero = numpy.zeros(nFac,dtype=opengm.value_type)
if weights is None:
pf=opengm.pottsFunctions([nVar,nVar],wZero,wZero)
else :
w = numpy.require(weights,dtype=opengm.value_type)
pf=opengm.pottsFunctions([nVar,nVar],wZero,w)
fids = gm.addFunctions(pf)
gm.addFactors(fids,boundArray)
cgc = opengm.inference.Cgc(gm=gm,parameter=parameter)
return cgc,gm
def multicutFromCgp2(cgp,e0,e1,parameter=None):
boundArray = cgp.cell1BoundsArray()-1
nVar = cgp.numCells(2)
nFac = cgp.numCells(1)
space = numpy.ones(nVar,dtype=opengm.label_type)*nVar
gm = opengm.gm(space)
#w = numpy.require(weights,dtype=opengm.value_type)
pf=opengm.pottsFunctions([nVar,nVar],e0,e1)
fids = gm.addFunctions(pf)
gm.addFactors(fids,boundArray)
cgc = opengm.inference.Cgc(gm=gm,parameter=parameter)
return cgc,gm
class AggloCut(object):
def __init__(self,initCgp,edgeImage,featureImage,rgbImage,siftImage,histImage):
self.initCgp = initCgp
self.edgeImage = edgeImage
self.featureImage = featureImage
self.rgbImage = rgbImage
self.siftImage = siftImage
self.histImage = histImage
#
self.iterCgp = initCgp
def infer(self,gammas,deleteN):
cgp2d.visualize(self.rgbImage,cgp=self.iterCgp)
for gamma in gammas:
# get the weights for this gamma
#weights = gradientToWeight(self.edgeImage,gamma)
#w=e1-e0
cuts=True
while(True):
edge = self.iterCgp.accumulateCellFeatures(cellType=1,image=self.edgeImage,features='Mean')[0]['Mean']
feat = self.iterCgp.accumulateCellFeatures(cellType=2,image=self.featureImage,features='Mean')[0]['Mean']
sift = self.iterCgp.accumulateCellFeatures(cellType=2,image=self.siftImage,features='Mean')[0]['Mean']
hist = self.iterCgp.accumulateCellFeatures(cellType=2,image=self.histImage,features='Mean')[0]['Mean']
featDiff = numpy.sqrt(self.iterCgp.cell2ToCell1Feature(feat,mode='l2'))/10.0
siftDiff = (self.iterCgp.cell2ToCell1Feature(sift,mode='chi2'))*10
histDiff = (self.iterCgp.cell2ToCell1Feature(hist,mode='chi2'))*10
print 'featMax',featDiff.min(),featDiff.max()
print 'edgeMax',edge.min(),edge.max()
print 'sift',siftDiff.min(),siftDiff.max()
print 'hist',histDiff.min(),histDiff.max()
edge+=0.1*featDiff
edge+=1.0*siftDiff
edge+=3.0*histDiff
cuts=False
e1=numpy.exp(-gamma*edge)
e0=1.0-e1
for ci in range(self.iterCgp.numCells(1)):
size = len(self.iterCgp.cells1[ci].points)
#print size
e0[ci]*=float(size)
e1[ci]*=float(size)
for ci in range(self.iterCgp.numCells(1)):
bb = len(self.iterCgp.cells1[ci].boundedBy)
if bb==0 :
print "ZERO BOUNDS \n\n"
#e0[ci]*=float(size)
e1[ci]+=2.0
for ci in range(self.iterCgp.numCells(2)):
size = len(self.iterCgp.cells1[ci].points)
if size<=200 :
boundedBy=numpy.array(self.iterCgp.cells2[ci].boundedBy)-1
e1[boundedBy]+=2.0
w = e1-e0
if True:
cgc,gm = multicutFromCgp2(cgp=self.iterCgp,e0=e0,e1=e1,parameter=opengm.InfParam(planar=True,inferMinMarginals=True))
deleteN = 1#2*int(float(self.iterCgp.numCells(1))**(0.5)+0.5)
#cgc.infer(cgc.verboseVisitor())
cgc.infer()
argDual = cgc.argDual()
if(argDual.min()==1):
print "READ GAMMA"
gamma*=0.9
continue
else:
cuts=True
#cgp2d.visualize(self.rgbImage,cgp=self.iterCgp,edge_data_in=argDual.astype(numpy.float32))
factorMinMarginals = cgc.factorMinMarginals()
m0 = factorMinMarginals[:,0].astype(numpy.float128)
m1 = factorMinMarginals[:,1].astype(numpy.float128)
m0*=-1.0
m1*=-1.0
p0 = numpy.exp(m0)/(numpy.exp(m0)+numpy.exp(m1))
p1 = numpy.exp(m1)/(numpy.exp(m0)+numpy.exp(m1))
#cgp2d.visualize(self.rgbImage,cgp=self.iterCgp,edge_data_in=p1.astype(numpy.float32))
whereOn = numpy.where(argDual==1)
nOn = len(whereOn[0])
nOff = len(p0)-nOn
print "nOn",nOn,"off",nOff
p1[whereOn]+=100.0
sortedIndex = numpy.argsort(p1)
toDelete = 1
if deleteN > nOff:
toDelete = nOff
cellStates = numpy.ones(self.iterCgp.numCells(1),dtype=numpy.uint32)
cellStates[sortedIndex[:toDelete]]=0
#cellStates[numpy.argmax(w)]=0
print "argmax"
else :
cellStates = numpy.ones(self.iterCgp.numCells(1),dtype=numpy.uint32)
#cellStates[sortedIndex[:toDelete]]=0
cellStates[numpy.argmax(w)]=0
if self.iterCgp.numCells(2)<50:
cgp2d.visualize(self.rgbImage,cgp=self.iterCgp)
print "merge cells",self.iterCgp.numCells(2),self.iterCgp.numCells(1)
newtgrid = self.iterCgp.merge2Cells(cellStates)
self.iterCgp = cgp2d.Cgp(newtgrid)
class CeMc(object):
def __init__(self,cgp):
self.cgp=cgp | mit |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tests/test_format.py | 1 | 81984 | from __future__ import print_function
# -*- coding: utf-8 -*-
from pandas.compat import range, zip, lrange, StringIO, PY3, lzip, u
import pandas.compat as compat
import itertools
import os
import sys
from textwrap import dedent
import warnings
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import DataFrame, Series, Index, _np_version_under1p7, Timestamp
import pandas.core.format as fmt
import pandas.util.testing as tm
from pandas.util.terminal import get_terminal_size
import pandas
import pandas.tslib as tslib
import pandas as pd
from pandas.core.config import (set_option, get_option,
option_context, reset_option)
from datetime import datetime
_frame = DataFrame(tm.getSeriesData())
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
return r.split('\n')[0].startswith("<class")
def has_horizontally_truncated_repr(df):
r = repr(df)
return any(l.strip().endswith('...') for l in r.splitlines())
def has_vertically_truncated_repr(df):
r = repr(df)
return '..' in r.splitlines()[-3]
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
def skip_if_np_version_under1p7():
if _np_version_under1p7:
import nose
raise nose.SkipTest('numpy >= 1.7 required')
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
class TestDataFrameFormatting(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
self.frame = _frame.copy()
def tearDown(self):
warnings.filters = self.warn_filters
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self):
self.frame.ix[5] = 0
fmt.set_eng_float_format()
result = repr(self.frame)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(self.frame)
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
fmt.reset_option('^display.')
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': lzip(range(10), range(10))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(max_len - 1,
max_len + 1)) for i in range(10)]})
r = repr(df)
r = r[r.find('\n') + 1:]
_strlen = fmt._strlen_func()
for line, value in lzip(r.split('\n'), df['B']):
if _strlen(value) + 1 > max_len:
self.assert_('...' in line)
else:
self.assert_('...' not in line)
with option_context("display.max_colwidth", 999999):
self.assert_('...' not in repr(df))
with option_context("display.max_colwidth", max_len + 2):
self.assert_('...' not in repr(df))
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5],[0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1\n\n[2 rows x 2 columns]')
with option_context("display.chop_threshold", 0.2 ):
self.assertEqual(repr(df), ' 0 1\n0 0.0 0.5\n1 0.5 0.0\n\n[2 rows x 2 columns]')
with option_context("display.chop_threshold", 0.6 ):
self.assertEqual(repr(df), ' 0 1\n0 0 0\n1 0 0\n\n[2 rows x 2 columns]')
with option_context("display.chop_threshold", None ):
self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1\n\n[2 rows x 2 columns]')
def test_repr_obeys_max_seq_limit(self):
import pandas.core.common as com
with option_context("display.max_seq_items",2000):
self.assertTrue(len(com.pprint_thing(lrange(1000))) > 1000)
with option_context("display.max_seq_items",5):
self.assertTrue(len(com.pprint_thing(lrange(1000)))< 100)
def test_repr_is_valid_construction_code(self):
import pandas as pd
# for the case of Index, where the repr is traditional rather then stylized
idx = pd.Index(['a','b'])
res = eval("pd."+repr(idx))
tm.assert_series_equal(Series(res),Series(idx))
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"),
u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
self.assertTrue(type(df.__repr__()) == str) # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
self.assertTrue('\\' not in repr(df))
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
df_wide = DataFrame('hello', [0], lrange(10))
df_tall = DataFrame('hello', lrange(30), lrange(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10,
'display.width',20,
'display.max_rows', 20):
with option_context('display.expand_frame_repr', True):
self.assertFalse(has_truncated_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
self.assertFalse(has_truncated_repr(df_wide))
self.assertTrue(has_expanded_repr(df_wide))
self.assertTrue(has_vertically_truncated_repr(df_tall))
self.assertTrue(has_expanded_repr(df_tall))
with option_context('display.expand_frame_repr', False):
self.assertFalse(has_truncated_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
self.assertFalse(has_horizontally_truncated_repr(df_wide))
self.assertFalse(has_expanded_repr(df_wide))
self.assertTrue(has_vertically_truncated_repr(df_tall))
self.assertFalse(has_expanded_repr(df_tall))
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', lrange(1000), lrange(5))
with option_context('mode.sim_interactive', False,
'display.width', 0,
'display.height', 0,
'display.max_rows',5000):
self.assertFalse(has_truncated_repr(df))
self.assertFalse(has_expanded_repr(df))
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
raise nose.SkipTest("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['%05d' % i for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
self.assertFalse(has_expanded_repr(mkframe(4)))
self.assertFalse(has_expanded_repr(mkframe(5)))
self.assertFalse(has_expanded_repr(df6))
self.assertTrue(has_doubly_truncated_repr(df6))
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
self.assertFalse(has_expanded_repr(df6))
self.assertFalse(has_truncated_repr(df6))
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
self.assertFalse(has_expanded_repr(df10))
self.assertTrue(has_vertically_truncated_repr(df10))
# width=None in terminal, auto detection
with option_context('display.max_columns', 100,
'display.max_rows', term_width * 20,
'display.width', None):
df = mkframe((term_width // 7) - 2)
self.assertFalse(has_expanded_repr(df))
df = mkframe((term_width // 7) + 2)
print( df._repr_fits_horizontal_())
self.assertTrue(has_expanded_repr(df))
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = [u('\u03c3')] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', u('\u03c3a'), 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except:
pass
if not line.startswith('dtype:'):
self.assert_(len(line) == line_len)
# it works even if sys.stdin in None
_stdin= sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = self.frame.to_string()
tm.assert_isinstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
with option_context('display.max_rows', 1):
df = pd.DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({u('c/\u03c3'): []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x%x' % x),
('float', lambda x: '[% 4.1f]' % x),
('object', lambda x: '-%s-' % str(x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=lzip(*formatters)[1])
self.assertEqual(result, (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-'))
self.assertEqual(result, result2)
def test_to_string_with_formatters_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
result = df.to_string(formatters={u('c/\u03c3'):
lambda x: '%s' % x})
self.assertEqual(result, u(' c/\u03c3\n') +
'0 1\n1 2\n2 3')
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({u('c/\u03c3'): Series()})
nonempty = DataFrame({u('c/\u03c3'): Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
self.assertTrue(c10 < c20 < c30)
def test_to_html_with_col_space(self):
def check_with_width(df, col_space):
import re
# check that col_space affects HTML generation
# and be very brittle about it.
html = df.to_html(col_space=col_space)
hdrs = [x for x in html.split("\n") if re.search("<th[>\s]", x)]
self.assertTrue(len(hdrs) > 0)
for h in hdrs:
self.assertTrue("min-width" in h)
self.assertTrue(str(col_space) in h)
df = DataFrame(np.random.random(size=(1, 3)))
check_with_width(df, 30)
check_with_width(df, 50)
def test_to_html_with_empty_string_label(self):
# GH3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
res = df.to_html()
self.assertTrue("rowspan" not in res)
def test_to_html_unicode(self):
# it works!
df = DataFrame({u('\u03c3'): np.arange(10.)})
df.to_html()
df = DataFrame({'A': [u('\u03c3')]})
df.to_html()
def test_to_html_escaped(self):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: "<type 'str'>",
b: "<type 'str'>"},
'co>l2':{a: "<type 'str'>",
b: "<type 'str'>"}}
rs = pd.DataFrame(test_dict).to_html()
xp = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>co<l1</th>
<th>co>l2</th>
</tr>
</thead>
<tbody>
<tr>
<th>str<ing1 &amp;</th>
<td> <type 'str'></td>
<td> <type 'str'></td>
</tr>
<tr>
<th>stri>ng2 &amp;</th>
<td> <type 'str'></td>
<td> <type 'str'></td>
</tr>
</tbody>
</table>"""
self.assertEqual(xp, rs)
def test_to_html_escape_disabled(self):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: "<b>bold</b>",
b: "<b>bold</b>"},
'co>l2': {a: "<b>bold</b>",
b: "<b>bold</b>"}}
rs = pd.DataFrame(test_dict).to_html(escape=False)
xp = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>co<l1</th>
<th>co>l2</th>
</tr>
</thead>
<tbody>
<tr>
<th>str<ing1 &</th>
<td> <b>bold</b></td>
<td> <b>bold</b></td>
</tr>
<tr>
<th>stri>ng2 &</th>
<td> <b>bold</b></td>
<td> <b>bold</b></td>
</tr>
</tbody>
</table>"""
self.assertEqual(xp, rs)
def test_to_html_multiindex_sparsify_false_multi_sparse(self):
with option_context('display.multi_sparse', False):
index = pd.MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td> 0</td>
<td> 1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td> 2</td>
<td> 3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td> 4</td>
<td> 5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td> 6</td>
<td> 7</td>
</tr>
</tbody>
</table>"""
self.assertEquals(result, expected)
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=index[::2], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th>foo</th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th></th>
<th></th>
<th>0</th>
<th>0</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td> 0</td>
<td> 1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td> 2</td>
<td> 3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td> 4</td>
<td> 5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td> 6</td>
<td> 7</td>
</tr>
</tbody>
</table>"""
self.assertEquals(result, expected)
def test_to_html_multiindex_sparsify(self):
index = pd.MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
result = df.to_html()
expected = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">0</th>
<th>0</th>
<td> 0</td>
<td> 1</td>
</tr>
<tr>
<th>1</th>
<td> 2</td>
<td> 3</td>
</tr>
<tr>
<th rowspan="2" valign="top">1</th>
<th>0</th>
<td> 4</td>
<td> 5</td>
</tr>
<tr>
<th>1</th>
<td> 6</td>
<td> 7</td>
</tr>
</tbody>
</table>"""
self.assertEquals(result, expected)
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=index[::2], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th>foo</th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th></th>
<th></th>
<th>0</th>
<th>0</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">0</th>
<th>0</th>
<td> 0</td>
<td> 1</td>
</tr>
<tr>
<th>1</th>
<td> 2</td>
<td> 3</td>
</tr>
<tr>
<th rowspan="2" valign="top">1</th>
<th>0</th>
<td> 4</td>
<td> 5</td>
</tr>
<tr>
<th>1</th>
<td> 6</td>
<td> 7</td>
</tr>
</tbody>
</table>"""
self.assertEquals(result, expected)
def test_to_html_index_formatter(self):
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4))
f = lambda x: 'abcd'[x]
result = df.to_html(formatters={'__index__': f})
expected = """\
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>foo</th>
<th>None</th>
</tr>
</thead>
<tbody>
<tr>
<th>a</th>
<td> 0</td>
<td> 1</td>
</tr>
<tr>
<th>b</th>
<td> 2</td>
<td> 3</td>
</tr>
<tr>
<th>c</th>
<td> 4</td>
<td> 5</td>
</tr>
<tr>
<th>d</th>
<td> 6</td>
<td> 7</td>
</tr>
</tbody>
</table>"""
self.assertEquals(result, expected)
def test_to_html_regression_GH6098(self):
df = DataFrame({u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')],
u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(rows=[u('clé1')], cols=[u('clé2')])._repr_html_()
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
self.assert_(len(lines[1]) == len(lines[2]))
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({u('c/\u03c3'): Series({'test': np.NaN})})
compat.text_type(dm.to_string())
def test_string_repr_encoding(self):
filepath = tm.get_data_path('unicode_series.csv')
df = pandas.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': np.inf * np.empty(10)})
foo = repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_pprint_thing(self):
import nose
from pandas.core.common import pprint_thing as pp_t
if PY3:
raise nose.SkipTest("doesn't work on Python 3")
self.assertEquals(pp_t('a') , u('a'))
self.assertEquals(pp_t(u('a')) , u('a'))
self.assertEquals(pp_t(None) , 'None')
self.assertEquals(pp_t(u('\u05d0'), quote_strings=True),
u("u'\u05d0'"))
self.assertEquals(pp_t(u('\u05d0'), quote_strings=False),
u('\u05d0'))
self.assertEquals(pp_t((u('\u05d0'),
u('\u05d1')), quote_strings=True),
u("(u'\u05d0', u'\u05d1')"))
self.assertEquals(pp_t((u('\u05d0'), (u('\u05d1'),
u('\u05d2'))),
quote_strings=True),
u("(u'\u05d0', (u'\u05d1', u'\u05d2'))"))
self.assertEquals(pp_t(('foo', u('\u05d0'), (u('\u05d0'),
u('\u05d0'))),
quote_strings=True),
u("(u'foo', u'\u05d0', (u'\u05d0', u'\u05d0'))"))
# escape embedded tabs in string
# GH #2038
self.assertTrue(not "\t" in pp_t("a\tb", escape_chars=("\t",)))
def test_wide_repr(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in range(l)]
max_cols = get_option('display.max_columns')
df = DataFrame([col(max_cols - 1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x %d columns" % (max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assert_(rep_str != wide_repr)
with option_context('display.width', 120):
wider_repr = repr(df)
self.assert_(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(randn(5, 3), columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
self.assertEqual(len(rep_str.splitlines()), 22)
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in range(l)]
max_cols = get_option('display.max_columns')
df = DataFrame([col(max_cols-1, 25) for _ in range(10)])
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assert_(rep_str != wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assert_(len(wider_repr) < len(wide_repr))
for line in wide_repr.splitlines()[1::13]:
self.assert_('DataFrame Index' in line)
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in range(l)]
midx = pandas.MultiIndex.from_arrays([np.array(col(10, 5)),
np.array(col(10, 5))])
max_cols = get_option('display.max_columns')
df = DataFrame([col(max_cols-1, 25) for _ in range(10)],
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assert_(rep_str != wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assert_(len(wider_repr) < len(wide_repr))
for line in wide_repr.splitlines()[1::13]:
self.assert_('Level 0 Level 1' in line)
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
col = lambda l, k: [tm.rands(k) for _ in range(l)]
midx = pandas.MultiIndex.from_arrays([np.array(col(10, 5)),
np.array(col(10, 5))])
mcols = pandas.MultiIndex.from_arrays([np.array(col(max_cols-1, 3)),
np.array(col(max_cols-1, 3))])
df = DataFrame([col(max_cols-1, 25) for _ in range(10)],
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assert_(rep_str != wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assert_(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.randu(k) for _ in range(l)]
max_cols = get_option('display.max_columns')
df = DataFrame([col(max_cols-1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assert_(rep_str != wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assert_(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(
{'a': ['a' * 30, 'b' * 30], 'b': ['c' * 70, 'd' * 80]})
result = repr(df)
self.assertTrue('ccccc' in result)
self.assertTrue('ddddd' in result)
def test_long_series(self):
n = 1000
s = Series(np.random.randint(-50,50,n),index=['s%04d' % x for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype',str_rep))
self.assert_(nmatches == 1)
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3', 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
'id3': {0: '78d', 1: '79d'}, 'value': {0: 123, 1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(' id1 id3 value\nid2 \nNaN 1a3 78d 123\nd67 9h4 79d 64')
self.assertEqual(result, expected)
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# all-nan in mi
df2 = df.copy()
df2.ix[:,'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = u(' id1 id3 value\nid2 \nNaN 1a3 78d 123\nNaN 9h4 79d 64')
self.assertEqual(result, expected)
# partial nan in mi
df2 = df.copy()
df2.ix[:,'id2'] = np.nan
y = df2.set_index(['id2','id3'])
result = y.to_string()
expected = u(' id1 value\nid2 id3 \nNaN 78d 1a3 123\n 79d 9h4 64')
self.assertEqual(result, expected)
df = DataFrame({'id1': {0: np.nan, 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
'id3': {0: np.nan, 1: '79d'}, 'value': {0: 123, 1: 64}})
y = df.set_index(['id1','id2','id3'])
result = y.to_string()
expected = u(' value\nid1 id2 id3 \nNaN NaN NaN 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
def test_to_string(self):
from pandas import read_table
import re
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie['A'][:20] = nan
biggie['B'][:20] = nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue(), s)
tm.assert_isinstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join([re.sub('\s+', ' ', x).strip() for x in lines[1:]])
recons = read_table(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
self.assertEqual(recons['A'].count(), biggie['A'].count())
self.assert_((np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all())
# expected = ['B', 'A']
# self.assertEqual(header, expected)
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
self.assertEqual(header, expected)
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12,
float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert(df_s == expected)
def test_to_string_no_index(self):
df = DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
df_s = df.to_string(index=False)
expected = " x y\n 1 4\n 2 5\n 3 6"
assert(df_s == expected)
def test_to_string_float_formatting(self):
fmt.reset_option('^display.')
fmt.set_option('display.precision', 6, 'display.column_space',
12, 'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6,
1.7e+8, 1.253456, np.pi, -1e6]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
assert(df_s == expected)
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n'
'0 3234.000\n'
'1 0.253')
assert(df_s == expected)
fmt.reset_option('^display.')
self.assertEqual(get_option("display.precision"), 7)
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
assert(df_s == expected)
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '%.4g' % 1.7e8 == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
self.assertEqual(result, expected)
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n'
'0 0\n'
'1 0\n'
'2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(lrange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
self.assertEqual(result, expected)
def test_to_string_ascii_error(self):
data = [('0 ',
u(' .gitignore '),
u(' 5 '),
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
self.assert_(issubclass(df['x'].dtype.type, np.integer))
output = df.to_string()
expected = (' x\n'
'0 -15\n'
'1 20\n'
'2 25\n'
'3 -35')
self.assertEqual(output, expected)
def test_to_string_index_formatter(self):
df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc'[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
self.assertEqual(rs, xp)
def test_to_string_left_justify_cols(self):
fmt.reset_option('^display.')
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n'
'0 3234.000\n'
'1 0.253')
assert(df_s == expected)
def test_to_string_format_na(self):
fmt.reset_option('^display.')
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
self.assertEqual(result, expected)
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1 foo\n'
'2 -2 foooo\n'
'3 3 fooooo\n'
'4 4 bar')
self.assertEqual(result, expected)
def test_to_string_line_width(self):
df = pd.DataFrame(123, lrange(10, 15), lrange(30))
s = df.to_string(line_width=80)
self.assertEqual(max(len(l) for l in s.split('\n')), 80)
def test_to_html(self):
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie['A'][:20] = nan
biggie['B'][:20] = nan
s = biggie.to_html()
buf = StringIO()
retval = biggie.to_html(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue(), s)
tm.assert_isinstance(s, compat.string_types)
biggie.to_html(columns=['B', 'A'], col_space=17)
biggie.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_html(columns=['B', 'A'], float_format=str)
biggie.to_html(columns=['B', 'A'], col_space=12,
float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_html()
def test_to_html_filename(self):
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie['A'][:20] = nan
biggie['B'][:20] = nan
with tm.ensure_clean('test.html') as path:
biggie.to_html(path)
with open(path, 'r') as f:
s = biggie.to_html()
s2 = f.read()
self.assertEqual(s, s2)
frame = DataFrame(index=np.arange(200))
with tm.ensure_clean('test.html') as path:
frame.to_html(path)
with open(path, 'r') as f:
self.assertEqual(frame.to_html(), f.read())
def test_to_html_with_no_bold(self):
x = DataFrame({'x': randn(5)})
ashtml = x.to_html(bold_rows=False)
assert('<strong>' not in ashtml[ashtml.find('</thead>')])
def test_to_html_columns_arg(self):
result = self.frame.to_html(columns=['A'])
self.assert_('<th>B</th>' not in result)
def test_to_html_multiindex(self):
columns = pandas.MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2),
np.mod(lrange(4), 2))),
names=['CL0', 'CL1'])
df = pandas.DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify='left')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr>\n'
' <th>CL0</th>\n'
' <th colspan="2" halign="left">0</th>\n'
' <th colspan="2" halign="left">1</th>\n'
' </tr>\n'
' <tr>\n'
' <th>CL1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td> a</td>\n'
' <td> b</td>\n'
' <td> c</td>\n'
' <td> d</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td> e</td>\n'
' <td> f</td>\n'
' <td> g</td>\n'
' <td> h</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
columns = pandas.MultiIndex.from_tuples(list(zip(range(4),
np.mod(lrange(4), 2))))
df = pandas.DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify='right')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr>\n'
' <th></th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>2</th>\n'
' <th>3</th>\n'
' </tr>\n'
' <tr>\n'
' <th></th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td> a</td>\n'
' <td> b</td>\n'
' <td> c</td>\n'
' <td> d</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td> e</td>\n'
' <td> f</td>\n'
' <td> g</td>\n'
' <td> h</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
def test_to_html_justify(self):
df = pandas.DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify='left')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: left;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td> 6</td>\n'
' <td> 1</td>\n'
' <td> 223442</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td> 30000</td>\n'
' <td> 2</td>\n'
' <td> 0</td>\n'
' </tr>\n'
' <tr>\n'
' <th>2</th>\n'
' <td> 2</td>\n'
' <td> 70000</td>\n'
' <td> 1</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
result = df.to_html(justify='right')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td> 6</td>\n'
' <td> 1</td>\n'
' <td> 223442</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td> 30000</td>\n'
' <td> 2</td>\n'
' <td> 0</td>\n'
' </tr>\n'
' <tr>\n'
' <th>2</th>\n'
' <td> 2</td>\n'
' <td> 70000</td>\n'
' <td> 1</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
def test_to_html_index(self):
index = ['foo', 'bar', 'baz']
df = pandas.DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.NaN]},
columns=['A', 'B', 'C'],
index=index)
result = df.to_html(index=False)
for i in index:
self.assert_(i not in result)
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = pandas.MultiIndex.from_tuples(tuples)
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
self.assert_(i not in result)
def test_repr_html(self):
self.frame._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
fmt.reset_option('^display.')
df = DataFrame([[1, 2], [3, 4]])
self.assertTrue('2 rows' in df._repr_html_())
fmt.set_option('display.show_dimensions', False)
self.assertFalse('2 rows' in df._repr_html_())
fmt.reset_option('^display.')
def test_repr_html_wide(self):
row = lambda l, k: [tm.rands(k) for _ in range(l)]
max_cols = get_option('display.max_columns')
df = DataFrame([row(max_cols-1, 25) for _ in range(10)])
reg_repr = df._repr_html_()
assert "..." not in reg_repr
wide_df = DataFrame([row(max_cols+1, 25) for _ in range(10)])
wide_repr = wide_df._repr_html_()
assert "..." in wide_repr
def test_repr_html_wide_multiindex_cols(self):
row = lambda l, k: [tm.rands(k) for _ in range(l)]
max_cols = get_option('display.max_columns')
tuples = list(itertools.product(np.arange(max_cols//2), ['foo', 'bar']))
mcols = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame([row(len(mcols), 25) for _ in range(10)], columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(1+(max_cols//2)), ['foo', 'bar']))
mcols = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame([row(len(mcols), 25) for _ in range(10)], columns=mcols)
wide_repr = df._repr_html_()
assert '...' in wide_repr
def test_repr_html_long(self):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = pandas.DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)})
reg_repr = df._repr_html_()
assert '...' not in reg_repr
assert str(40 + h) in reg_repr
h = max_rows + 1
df = pandas.DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)})
long_repr = df._repr_html_()
assert '...' in long_repr
assert str(40 + h) not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_float(self):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = pandas.DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx')
reg_repr = df._repr_html_()
assert '...' not in reg_repr
assert str(40 + h) in reg_repr
h = max_rows + 1
df = pandas.DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx')
long_repr = df._repr_html_()
assert '...' in long_repr
assert str(40 + h) not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = get_option('display.max_rows')
max_L1 = max_rows//2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1*2, 2), index=idx,
columns=['A', 'B'])
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1+1), ['foo', 'bar']))
idx = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1+1)*2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = get_option('display.max_columns')
max_rows = get_option('display.max_rows')
h, w = max_rows-1, max_cols-1
df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '...' not in df._repr_html_()
h, w = max_rows+1, max_cols+1
df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '...' in df._repr_html_()
def test_info_repr(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows+1, max_cols-1
df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows-1, max_cols+1
df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
def test_info_repr_html(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows+1, max_cols-1
df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert '<class' in df._repr_html_()
# Wide
h, w = max_rows-1, max_cols+1
df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self):
def get_ipython():
return {'config':
{'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = self.frame._repr_html_()
self.assert_(repstr is not None)
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
self.assert_('class' in repstr) # info fallback
fmt.reset_option('^display.')
def test_to_html_with_classes(self):
df = pandas.DataFrame()
result = df.to_html(classes="sortable draggable")
expected = dedent("""
<table border="1" class="dataframe sortable draggable">
<tbody>
<tr>
<td>Index([], dtype='object')</td>
<td>Empty DataFrame</td>
</tr>
</tbody>
</table>
""").strip()
self.assertEqual(result, expected)
result = df.to_html(classes=["sortable", "draggable"])
self.assertEqual(result, expected)
def test_pprint_pathological_object(self):
"""
if the test fails, the stack will overflow and nose crash,
but it won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = pandas.DataFrame([A()])
repr(df) # just don't dine
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
self.assert_(('+010' in line) or skip)
else:
self.assert_(('+10' in line) or skip)
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
self.assertTrue("'a': 1" in val)
self.assertTrue("'b': 2" in val)
def test_to_latex_filename(self):
with tm.ensure_clean('test.tex') as path:
self.frame.to_latex(path)
with open(path, 'r') as f:
self.assertEqual(self.frame.to_latex(), f.read())
def test_to_latex(self):
# it works!
self.frame.to_latex()
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex()
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
a & b \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_latex_escape_special_chars(self):
special_characters = ['&','%','$','#','_',
'{','}','~','^','\\']
df = DataFrame(data=special_characters)
observed = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & \& \\
1 & \% \\
2 & \$ \\
3 & \# \\
4 & \_ \\
5 & \{ \\
6 & \} \\
7 & \textasciitilde \\
8 & \textasciicircum \\
9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
self.assertEqual(observed, expected)
class TestSeriesFormatting(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series([u('\u03c3')] * 10)
repr(s)
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Freq: B, Name: foo, Length: %d, dtype: float64" % len(cp))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=pd.date_range('1/1/2000', periods=10), name=0)
result = repr(s)
self.assertTrue('Freq: D, Name: 0' in result)
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = (u('0 foo\n') +
u('1 NaN\n') +
u('2 -1.23\n') +
u('3 4.56'))
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') +
'1 NaN\n' +
'2 bar\n' +
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') +
'1 5\n' +
'2 bar\n' +
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = (u('0 NaN\n') +
'1 1.5678\n' +
'2 NaN\n' +
'3 -3.0000\n' +
'4 NaN')
self.assertEqual(result, expected)
def test_unicode_name_in_footer(self):
s = Series([1, 2], name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf = fmt.SeriesFormatter(s, name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf._get_footer() # should not raise exception
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
self.assert_('+010' in line)
else:
self.assert_('+10' in line)
def test_datetimeindex(self):
from pandas import date_range, NaT
index = date_range('20130102',periods=6)
s = Series(1,index=index)
result = s.to_string()
self.assertTrue('2013-01-02' in result)
# nat in index
s2 = Series(2, index=[ Timestamp('20130111'), NaT ])
s = s2.append(s)
result = s.to_string()
self.assertTrue('NaT' in result)
# nat in summary
result = str(s2.index)
self.assertTrue('NaT' in result)
def test_timedelta64(self):
from pandas import date_range
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s-s.shift(1)
result = y.to_string()
self.assertTrue('1 days' in result)
self.assertTrue('00:00:00' not in result)
self.assertTrue('NaT' in result)
# with frac seconds
o = Series([datetime(2012,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-0 days, 00:00:00.000150' in result)
# rounding?
o = Series([datetime(2012,1,1,1)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-0 days, 01:00:00' in result)
self.assertTrue('1 days, 23:00:00' in result)
o = Series([datetime(2012,1,1,1,1)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-0 days, 01:01:00' in result)
self.assertTrue('1 days, 22:59:00' in result)
o = Series([datetime(2012,1,1,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-0 days, 01:01:00.000150' in result)
self.assertTrue('1 days, 22:58:59.999850' in result)
# neg time
td = timedelta(minutes=5,seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
self.assertTrue('-00:05:03' in result)
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
self.assertTrue('2012-01-01 23:59:59.999450' in result)
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2],
'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.ix[0])
self.assertTrue('2012-01-01' in result)
class TestEngFormatter(tm.TestCase):
_multiprocess_can_split_ = True
def test_eng_float_formatter(self):
df = DataFrame({'A': [1.41, 141., 14100, 1410000.]})
fmt.set_eng_float_format()
result = df.to_string()
expected = (' A\n'
'0 1.410E+00\n'
'1 141.000E+00\n'
'2 14.100E+03\n'
'3 1.410E+06')
self.assertEqual(result, expected)
fmt.set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = (' A\n'
'0 1.410\n'
'1 141.000\n'
'2 14.100k\n'
'3 1.410M')
self.assertEqual(result, expected)
fmt.set_eng_float_format(accuracy=0)
result = df.to_string()
expected = (' A\n'
'0 1E+00\n'
'1 141E+00\n'
'2 14E+03\n'
'3 1E+06')
self.assertEqual(result, expected)
fmt.reset_option('^display.')
def compare(self, formatter, input, output):
formatted_input = formatter(input)
msg = ("formatting of %s results in '%s', expected '%s'"
% (str(input), formatted_input, output))
self.assertEqual(formatted_input, output, msg)
def compare_all(self, formatter, in_out):
"""
Parameters:
-----------
formatter: EngFormatter under test
in_out: list of tuples. Each tuple = (number, expected_formatting)
It is tested if 'formatter(number) == expected_formatting'.
*number* should be >= 0 because formatter(-number) == fmt is also
tested. *fmt* is derived from *expected_formatting*
"""
for input, output in in_out:
self.compare(formatter, input, output)
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [(f * 10 ** -24, " 1.414y"),
(f * 10 ** -23, " 14.142y"),
(f * 10 ** -22, " 141.421y"),
(f * 10 ** -21, " 1.414z"),
(f * 10 ** -20, " 14.142z"),
(f * 10 ** -19, " 141.421z"),
(f * 10 ** -18, " 1.414a"),
(f * 10 ** -17, " 14.142a"),
(f * 10 ** -16, " 141.421a"),
(f * 10 ** -15, " 1.414f"),
(f * 10 ** -14, " 14.142f"),
(f * 10 ** -13, " 141.421f"),
(f * 10 ** -12, " 1.414p"),
(f * 10 ** -11, " 14.142p"),
(f * 10 ** -10, " 141.421p"),
(f * 10 ** -9, " 1.414n"),
(f * 10 ** -8, " 14.142n"),
(f * 10 ** -7, " 141.421n"),
(f * 10 ** -6, " 1.414u"),
(f * 10 ** -5, " 14.142u"),
(f * 10 ** -4, " 141.421u"),
(f * 10 ** -3, " 1.414m"),
(f * 10 ** -2, " 14.142m"),
(f * 10 ** -1, " 141.421m"),
(f * 10 ** 0, " 1.414"),
(f * 10 ** 1, " 14.142"),
(f * 10 ** 2, " 141.421"),
(f * 10 ** 3, " 1.414k"),
(f * 10 ** 4, " 14.142k"),
(f * 10 ** 5, " 141.421k"),
(f * 10 ** 6, " 1.414M"),
(f * 10 ** 7, " 14.142M"),
(f * 10 ** 8, " 141.421M"),
(f * 10 ** 9, " 1.414G"),
(f * 10 ** 10, " 14.142G"),
(f * 10 ** 11, " 141.421G"),
(f * 10 ** 12, " 1.414T"),
(f * 10 ** 13, " 14.142T"),
(f * 10 ** 14, " 141.421T"),
(f * 10 ** 15, " 1.414P"),
(f * 10 ** 16, " 14.142P"),
(f * 10 ** 17, " 141.421P"),
(f * 10 ** 18, " 1.414E"),
(f * 10 ** 19, " 14.142E"),
(f * 10 ** 20, " 141.421E"),
(f * 10 ** 21, " 1.414Z"),
(f * 10 ** 22, " 14.142Z"),
(f * 10 ** 23, " 141.421Z"),
(f * 10 ** 24, " 1.414Y"),
(f * 10 ** 25, " 14.142Y"),
(f * 10 ** 26, " 141.421Y")]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [(f * 10 ** -24, " 3.1416E-24"),
(f * 10 ** -23, " 31.4159E-24"),
(f * 10 ** -22, " 314.1593E-24"),
(f * 10 ** -21, " 3.1416E-21"),
(f * 10 ** -20, " 31.4159E-21"),
(f * 10 ** -19, " 314.1593E-21"),
(f * 10 ** -18, " 3.1416E-18"),
(f * 10 ** -17, " 31.4159E-18"),
(f * 10 ** -16, " 314.1593E-18"),
(f * 10 ** -15, " 3.1416E-15"),
(f * 10 ** -14, " 31.4159E-15"),
(f * 10 ** -13, " 314.1593E-15"),
(f * 10 ** -12, " 3.1416E-12"),
(f * 10 ** -11, " 31.4159E-12"),
(f * 10 ** -10, " 314.1593E-12"),
(f * 10 ** -9, " 3.1416E-09"),
(f * 10 ** -8, " 31.4159E-09"),
(f * 10 ** -7, " 314.1593E-09"),
(f * 10 ** -6, " 3.1416E-06"),
(f * 10 ** -5, " 31.4159E-06"),
(f * 10 ** -4, " 314.1593E-06"),
(f * 10 ** -3, " 3.1416E-03"),
(f * 10 ** -2, " 31.4159E-03"),
(f * 10 ** -1, " 314.1593E-03"),
(f * 10 ** 0, " 3.1416E+00"),
(f * 10 ** 1, " 31.4159E+00"),
(f * 10 ** 2, " 314.1593E+00"),
(f * 10 ** 3, " 3.1416E+03"),
(f * 10 ** 4, " 31.4159E+03"),
(f * 10 ** 5, " 314.1593E+03"),
(f * 10 ** 6, " 3.1416E+06"),
(f * 10 ** 7, " 31.4159E+06"),
(f * 10 ** 8, " 314.1593E+06"),
(f * 10 ** 9, " 3.1416E+09"),
(f * 10 ** 10, " 31.4159E+09"),
(f * 10 ** 11, " 314.1593E+09"),
(f * 10 ** 12, " 3.1416E+12"),
(f * 10 ** 13, " 31.4159E+12"),
(f * 10 ** 14, " 314.1593E+12"),
(f * 10 ** 15, " 3.1416E+15"),
(f * 10 ** 16, " 31.4159E+15"),
(f * 10 ** 17, " 314.1593E+15"),
(f * 10 ** 18, " 3.1416E+18"),
(f * 10 ** 19, " 31.4159E+18"),
(f * 10 ** 20, " 314.1593E+18"),
(f * 10 ** 21, " 3.1416E+21"),
(f * 10 ** 22, " 31.4159E+21"),
(f * 10 ** 23, " 314.1593E+21"),
(f * 10 ** 24, " 3.1416E+24"),
(f * 10 ** 25, " 31.4159E+24"),
(f * 10 ** 26, " 314.1593E+24")]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [(5.55555, ' 5.556'),
(55.5555, ' 55.556'),
(555.555, ' 555.555'),
(5555.55, ' 5.556k'),
(55555.5, ' 55.556k'),
(555555, ' 555.555k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [(5.55555, ' 5.6'),
(55.5555, ' 55.6'),
(555.555, ' 555.6'),
(5555.55, ' 5.6k'),
(55555.5, ' 55.6k'),
(555555, ' 555.6k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [(5.55555, ' 6'),
(55.5555, ' 56'),
(555.555, ' 556'),
(5555.55, ' 6k'),
(55555.5, ' 56k'),
(555555, ' 556k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
self.assertEqual(result, u(' 0.000'))
def _three_digit_exp():
return '%.4g' % 1.7e8 == '1.7e+008'
class TestFloatArrayFormatter(tm.TestCase):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
self.assertTrue(len(result) == 0)
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
self.assertEqual(result[0], " 12")
self.assertEqual(result[1], " 0")
class TestRepr_timedelta64(tm.TestCase):
@classmethod
def setUpClass(cls):
skip_if_np_version_under1p7()
def test_legacy(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
self.assertEqual(tslib.repr_timedelta64(delta_1d), "1 days, 00:00:00")
self.assertEqual(tslib.repr_timedelta64(-delta_1d), "-1 days, 00:00:00")
self.assertEqual(tslib.repr_timedelta64(delta_0d), "00:00:00")
self.assertEqual(tslib.repr_timedelta64(delta_1s), "00:00:01")
self.assertEqual(tslib.repr_timedelta64(delta_500ms), "00:00:00.500000")
self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s), "1 days, 00:00:01")
self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms), "1 days, 00:00:00.500000")
def test_short(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
self.assertEqual(tslib.repr_timedelta64(delta_1d, format='short'), "1 days")
self.assertEqual(tslib.repr_timedelta64(-delta_1d, format='short'), "-1 days")
self.assertEqual(tslib.repr_timedelta64(delta_0d, format='short'), "00:00:00")
self.assertEqual(tslib.repr_timedelta64(delta_1s, format='short'), "00:00:01")
self.assertEqual(tslib.repr_timedelta64(delta_500ms, format='short'), "00:00:00.500000")
self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s, format='short'), "1 days, 00:00:01")
self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms, format='short'), "1 days, 00:00:00.500000")
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
self.assertEqual(tslib.repr_timedelta64(delta_1d, format='long'), "1 days, 00:00:00")
self.assertEqual(tslib.repr_timedelta64(-delta_1d, format='long'), "-1 days, 00:00:00")
self.assertEqual(tslib.repr_timedelta64(delta_0d, format='long'), "0 days, 00:00:00")
self.assertEqual(tslib.repr_timedelta64(delta_1s, format='long'), "0 days, 00:00:01")
self.assertEqual(tslib.repr_timedelta64(delta_500ms, format='long'), "0 days, 00:00:00.500000")
self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s, format='long'), "1 days, 00:00:01")
self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms, format='long'), "1 days, 00:00:00.500000")
class TestTimedelta64Formatter(tm.TestCase):
@classmethod
def setUpClass(cls):
skip_if_np_version_under1p7()
def test_mixed(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(x + y).get_result()
self.assertEqual(result[0].strip(), "0 days, 00:00:00")
self.assertEqual(result[1].strip(), "1 days, 00:00:01")
def test_mixed_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-(x + y)).get_result()
self.assertEqual(result[0].strip(), "0 days, 00:00:00")
self.assertEqual(result[1].strip(), "-1 days, 00:00:01")
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "0 days")
self.assertEqual(result[1].strip(), "1 days")
result = fmt.Timedelta64Formatter(x[1:2]).get_result()
self.assertEqual(result[0].strip(), "1 days")
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x).get_result()
self.assertEqual(result[0].strip(), "0 days")
self.assertEqual(result[1].strip(), "-1 days")
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y).get_result()
self.assertEqual(result[0].strip(), "00:00:00")
self.assertEqual(result[1].strip(), "00:00:01")
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y).get_result()
self.assertEqual(result[0].strip(), "00:00:00")
self.assertEqual(result[1].strip(), "-00:00:01")
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "0 days")
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "0 days")
class TestDatetime64Formatter(tm.TestCase):
def test_mixed(self):
x = pd.Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 00:00:00")
self.assertEqual(result[1].strip(), "2013-01-01 12:00:00")
def test_dates(self):
x = pd.Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01")
self.assertEqual(result[1].strip(), "2013-01-02")
def test_date_nanos(self):
x = pd.Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "1970-01-01 00:00:00.000000200")
class TestNaTFormatting(tm.TestCase):
def test_repr(self):
self.assertEqual(repr(pd.NaT), "NaT")
def test_str(self):
self.assertEqual(str(pd.NaT), "NaT")
class TestDatetimeIndexFormat(tm.TestCase):
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
self.assertEqual(formatted[0], "2003-01-01 12:00:00")
self.assertEqual(formatted[1], "NaT")
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
self.assertEqual(formatted[0], "2003-01-01")
self.assertEqual(formatted[1], "NaT")
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013,1,1)], utc=True).format()
self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
formatted = pd.to_datetime([datetime(2013,1,1), pd.NaT], utc=True).format()
self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
def test_date_explict_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(date_format="%m-%d-%Y", na_rep="UT")
self.assertEqual(formatted[0], "02-01-2003")
self.assertEqual(formatted[1], "UT")
class TestDatetimeIndexUnicode(tm.TestCase):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1)]))
self.assertTrue("[2013-01-01," in text)
self.assertTrue(", 2014-01-01]" in text)
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1,12), datetime(2014,1,1)]))
self.assertTrue("[2013-01-01 00:00:00," in text)
self.assertTrue(", 2014-01-01 00:00:00]" in text)
class TestStringRepTimestamp(tm.TestCase):
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
ts_nanos_only = Timestamp(200)
self.assertEqual(str(ts_nanos_only), "1970-01-01 00:00:00.000000200")
ts_nanos_micros = Timestamp(1200)
self.assertEqual(str(ts_nanos_micros), "1970-01-01 00:00:00.000001200")
def test_tz(self):
_skip_if_no_pytz()
import pytz
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
mmottahedi/neuralnilm_prototype | scripts/experiment035.py | 2 | 10172 | from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import timedelta
from numpy.random import rand
from time import time
from nilmtk import TimeFrame, DataSet, MeterGroup
"""
INPUT: quantized mains fdiff, all-hot
OUTPUT: appliance fdiff
Code taken from Lasagne and nolearn!
rsync command:
rsync -uvz --progress /home/jack/workspace/python/neuralnilm/scripts/*.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts
"""
SEQ_LENGTH = 14400
N_HIDDEN = 5
N_SEQ_PER_BATCH = 5 # Number of sequences per batch
LEARNING_RATE = 1e-1 # SGD learning rate
N_ITERATIONS = 1000 # Number of training iterations
N_INPUT_FEATURES = 1001 # 1 input for time of day + many-hot
N_OUTPUTS = 1
TZ = "Europe/London"
FILENAME = '/data/dk3810/ukdale.h5' # '/data/mine/vadeec/merged/ukdale.h5'
input_shape = (N_SEQ_PER_BATCH, SEQ_LENGTH, N_INPUT_FEATURES)
output_shape = (N_SEQ_PER_BATCH, SEQ_LENGTH, N_OUTPUTS)
############### GENERATE DATA ##############################
def quantize(data):
N_BINS = N_INPUT_FEATURES - 1
MID = N_BINS // 2
out = np.empty(shape=(len(data), N_BINS))
for i, d in enumerate(data):
hist, _ = np.histogram(d, bins=N_BINS, range=(-1, 1))
where = np.where(hist==1)[0][0]
if where > MID:
hist[MID:where] = 1
elif where < MID:
hist[where:MID] = 1
out[i,:] = hist
return (out * 2) - 1
def get_data_for_single_day(metergroup, target_appliance, start):
MAXIMUM = 200
MINIMUM = 20
start = pd.Timestamp(start).date()
end = start + timedelta(days=1)
timeframe = TimeFrame(start, end, tz=TZ)
load_kwargs = dict(sample_period=6, sections=[timeframe])
y = metergroup[target_appliance].power_series_all_data(**load_kwargs)
if y is None or y.max() < MINIMUM:
return None, None
#X = metergroup.power_series_all_data(**load_kwargs)
X = y + metergroup['boiler'].power_series_all_data(**load_kwargs)
index = pd.date_range(start, end, freq="6S", tz=TZ)
def get_diff(data):
data = data.fillna(0)
data = data.clip(upper=MAXIMUM)
data[data < MINIMUM] = 0
# data -= data.min()
data = data.reindex(index, fill_value=0)
data /= MAXIMUM
return data.diff().dropna()
def index_as_minus_one_to_plus_one(data):
data = get_diff(data)
index = data.index.astype(np.int64)
index -= np.min(index)
index = index.astype(np.float32)
index /= np.max(index)
return np.vstack([index, data.values]).transpose()
return index_as_minus_one_to_plus_one(X), get_diff(y)
def gen_unquantized_data(metergroup, validation=False):
'''Generate a simple energy disaggregation data.
:returns:
- X : np.ndarray, shape=(n_batch, length, 1)
Input sequence
- y : np.ndarray, shape=(n_batch, length, 1)
Target sequence, appliance 1
'''
X = np.empty(shape=(N_SEQ_PER_BATCH, SEQ_LENGTH, 2))
y = np.empty(output_shape)
N_DAYS = 600 # there are more like 632 days in the dataset
FIRST_DAY = pd.Timestamp("2013-04-12")
seq_i = 0
while seq_i < N_SEQ_PER_BATCH:
if validation:
days = np.random.randint(low=N_DAYS, high=N_DAYS + N_SEQ_PER_BATCH)
else:
days = np.random.randint(low=0, high=N_DAYS)
start = FIRST_DAY + timedelta(days=days)
X_one_seq, y_one_seq = get_data_for_single_day(metergroup,
'television', start)
if y_one_seq is not None:
try:
X[seq_i,:,:] = X_one_seq
y[seq_i,:,:] = y_one_seq.reshape(SEQ_LENGTH, 1)
except ValueError as e:
print(e)
print("Skipping", start)
else:
seq_i += 1
else:
print("Skipping", start)
return X, y
def gen_data(X=None, *args, **kwargs):
if X is None:
X, y = gen_unquantized_data(*args, **kwargs)
else:
y = None
X_quantized = np.empty(shape=input_shape)
for i in range(N_SEQ_PER_BATCH):
X_quantized[i,:,0] = X[i,:,0] # time of day
X_quantized[i,:,1:] = quantize(X[i,:,1])
return X_quantized, y
class ansi:
# from dnouri/nolearn/nolearn/lasagne.py
BLUE = '\033[94m'
GREEN = '\033[32m'
ENDC = '\033[0m'
######################## Neural network class ########################
class Net(object):
# Much of this code is adapted from craffel/nntools/examples/lstm.py
def __init__(self):
print("Initialising network...")
import theano
import theano.tensor as T
import lasagne
from lasagne.layers import (InputLayer, LSTMLayer, ReshapeLayer,
ConcatLayer, DenseLayer)
theano.config.compute_test_value = 'raise'
# Construct LSTM RNN: One LSTM layer and one dense output layer
l_in = InputLayer(shape=input_shape)
# setup fwd and bck LSTM layer.
l_fwd = LSTMLayer(
l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True)
l_bck = LSTMLayer(
l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True)
# concatenate forward and backward LSTM layers
concat_shape = (N_SEQ_PER_BATCH * SEQ_LENGTH, N_HIDDEN)
l_fwd_reshape = ReshapeLayer(l_fwd, concat_shape)
l_bck_reshape = ReshapeLayer(l_bck, concat_shape)
l_concat = ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1)
l_recurrent_out = DenseLayer(l_concat, num_units=N_OUTPUTS,
nonlinearity=None)
l_out = ReshapeLayer(l_recurrent_out, output_shape)
input = T.tensor3('input')
target_output = T.tensor3('target_output')
# add test values
input.tag.test_value = rand(
*input_shape).astype(theano.config.floatX)
target_output.tag.test_value = rand(
*output_shape).astype(theano.config.floatX)
print("Compiling Theano functions...")
# Cost = mean squared error
cost = T.mean((l_out.get_output(input) - target_output)**2)
# Use NAG for training
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE)
# Theano functions for training, getting output, and computing cost
self.train = theano.function(
[input, target_output],
cost, updates=updates, on_unused_input='warn',
allow_input_downcast=True)
self.y_pred = theano.function(
[input], l_out.get_output(input), on_unused_input='warn',
allow_input_downcast=True)
self.compute_cost = theano.function(
[input, target_output], cost, on_unused_input='warn',
allow_input_downcast=True)
print("Done initialising network.")
def training_loop(self):
# column 0 = training cost
# column 1 = validation cost
self.costs = np.zeros(shape=(N_ITERATIONS, 2))
self.costs[:,:] = np.nan
from nilmtk import DataSet
dataset = DataSet(FILENAME)
elec = dataset.buildings[1].elec
self.selected = elec
# APPLIANCES = ['kettle', 'television']
# selected_meters = [elec[appliance] for appliance in APPLIANCES]
# self.selected = MeterGroup(selected_meters)
# Generate a "validation" sequence whose cost we will compute
X_val, y_val = gen_data(metergroup=self.selected, validation=True)
assert X_val.shape == input_shape
assert y_val.shape == output_shape
# Adapted from dnouri/nolearn/nolearn/lasagne.py
print("""
Epoch | Train cost | Valid cost | Train / Val | Dur per epoch
--------|--------------|--------------|---------------|---------------\
""")
# Training loop
for n in range(N_ITERATIONS):
t0 = time() # for calculating training duration
X, y = gen_data(metergroup=self.selected)
train_cost = self.train(X, y).flatten()[0]
validation_cost = self.compute_cost(X_val, y_val).flatten()[0]
self.costs[n] = train_cost, validation_cost
# Print progress
duration = time() - t0
is_best_train = train_cost == np.nanmin(self.costs[:,0])
is_best_valid = validation_cost == np.nanmin(self.costs[:,1])
print(" {:>5} | {}{:>10.6f}{} | {}{:>10.6f}{} |"
" {:>11.6f} | {:>3.1f}s".format(
n,
ansi.BLUE if is_best_train else "",
train_cost,
ansi.ENDC if is_best_train else "",
ansi.GREEN if is_best_valid else "",
validation_cost,
ansi.ENDC if is_best_valid else "",
train_cost / validation_cost,
duration
))
def plot_costs(self, ax=None):
if ax is None:
ax = plt.gca()
ax.plot(self.costs[:,0], label='training')
ax.plot(self.costs[:,1], label='validation')
ax.set_xlabel('Iteration')
ax.set_ylabel('Cost')
ax.legend()
plt.show()
return ax
def plot_estimates(self, axes=None):
if axes is None:
_, axes = plt.subplots(2, sharex=True)
X, y = gen_unquantized_data(self.selected, validation=True)
y_predictions = self.y_pred(gen_data(X=X)[0])
axes[0].set_title('Appliance forward difference')
axes[0].plot(y_predictions[0,:,0], label='Estimates')
axes[0].plot(y[0,:,0], label='Appliance ground truth')
axes[0].legend()
axes[1].set_title('Aggregate')
axes[1].plot(X[0,:,1], label='Fdiff')
axes[1].plot(np.cumsum(X[0,:,1]), label='Cumsum')
axes[1].legend()
plt.show()
if __name__ == "__main__":
net = Net()
net.training_loop()
net.plot_costs()
net.plot_estimates()
| mit |
radjkarl/imgProcessor | imgProcessor/interpolate/interpolateCircular2dStructuredIDW.py | 1 | 6467 | from __future__ import division
import numpy as np
from numba import jit
from math import atan2
@jit(nopython=True)
def interpolateCircular2dStructuredIDW(grid, mask, kernel=15, power=2,
fr=1, fphi=1, cx=0, cy=0):
'''
same as interpolate2dStructuredIDW
but calculation distance to neighbour using polar coordinates
fr, fphi --> weight factors for radian and radius differences
cx,cy -> polar center of the array e.g. middle->(sx//2+1,sy//2+1)
'''
gx = grid.shape[0]
gy = grid.shape[0]
#FOR EVERY PIXEL
for i in range(gx):
for j in range(gy):
if mask[i,j]:
xmn = i-kernel
if xmn < 0:
xmn = 0
xmx = i+kernel
if xmx > gx:
xmx = gx
ymn = j-kernel
if ymn < 0:
ymn = 0
ymx = j+kernel
if ymx > gx:
ymx = gy
sumWi = 0.0
value = 0.0
#radius and radian to polar center:
R = ((i-cx)**2+(j-cy)**2)**0.5
PHI = atan2(j-cy, i-cx)
#FOR EVERY NEIGHBOUR IN KERNEL
for xi in range(xmn,xmx):
for yi in range(ymn,ymx):
if (xi != i or yi != j) and not mask[xi,yi]:
nR = ((xi-cx)**2+(yi-cy)**2)**0.5
dr = R - nR
#average radius between both p:
midR = 0.5*(R+nR)
#radian of neighbour p:
nphi = atan2(yi-cy, xi-cx)
#relative angle between both points:
dphi = min((2*np.pi) - abs(PHI - nphi),
abs(PHI - nphi))
dphi*=midR
dist = ((fr*dr)**2+(fphi*dphi)**2)**2
wi = 1 / dist**(0.5*power)
sumWi += wi
value += wi * grid[xi,yi]
if sumWi:
grid[i,j] = value / sumWi
return grid
if __name__ == '__main__':
import sys
from matplotlib import pyplot as plt
#this is part or a point spread function
arr = np.array([[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.00091412, 0.00092669, 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.00071046, 0.00087626, 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.00174763, 0.00316936, 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.00802817, 0.01606653, 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0.00052719, 0.00165561, 0.00208777, 0.00212379, 0. ,
0. , 0. , 0.01836601, 0.04002059, 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0.00052719, 0.00257932, 0.00291309, 0.00914339, 0.02844799,
0.04823197, 0.05040033, 0.06361089, 0.04638128, 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0.00225948, 0.00222627, 0.00755744, 0.0133372 ,
0.02761284, 0.06116419, 0.07565894, 0.05202775, 0.01511698,
0.00697312, 0.00270475, 0.00077251, 0.00067585, 0.00055524],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.0532791 , 0.06633063, 0.07244685, 0.03513939,
0.01519723, 0.00217622, 0.00107757, 0.00076782, 0.0004534 ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.04717624, 0.03095101, 0. , 0. ,
0. , 0.00164764, 0.00137625, 0.00075694, 0.00076486],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.02333552, 0.01279662, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.00623037, 0.00400915, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.00128086, 0.00131918, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.00080955, 0.00085656, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0.00094004, 0.00078282, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ]])
mask = arr==0
s = arr.shape
cx = s[0]//2+1
cy = s[1]//2+1
fit = interpolateCircular2dStructuredIDW(
arr.copy(), mask, fr=1, fphi=0.2, cx=cx, cy=cy)
if 'no_window' not in sys.argv:
plt.figure('original')
plt.imshow(arr, interpolation='none')
plt.colorbar()
plt.figure('fit')
plt.imshow(fit, interpolation='none')
plt.colorbar()
plt.show()
| gpl-3.0 |
herilalaina/scikit-learn | examples/cluster/plot_cluster_iris.py | 56 | 2815 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
# Though the following import is not directly being used, it is required
# for 3D projection to work
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = [('k_means_iris_8', KMeans(n_clusters=8)),
('k_means_iris_3', KMeans(n_clusters=3)),
('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,
init='random'))]
fignum = 1
titles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']
for name, est in estimators:
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=labels.astype(np.float), edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title(titles[fignum - 1])
ax.dist = 12
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean(),
X[y == label, 2].mean() + 2, name,
horizontalalignment='center',
bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title('Ground Truth')
ax.dist = 12
fig.show()
| bsd-3-clause |
dmnfarrell/mirnaseq | smallrnaseq/plotting.py | 2 | 7525 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Plotting methods for smallrnaseq package.
Created June 2014
Copyright (C) Damien Farrell
"""
from __future__ import absolute_import, print_function
import sys, os, string, types
import itertools
import matplotlib
matplotlib.use('agg')
import pylab as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style("ticks", {'axes.facecolor': '#F7F7F7',
'axes.grid': False,'legend.frameon':True})
sns.set_context("notebook", font_scale=1.3)
from . import base, utils, analysis
def venn_diagram(names,labels,ax=None,**kwargs):
"""Plot venn diagrams"""
from matplotlib_venn import venn2,venn3
f=None
if ax==None:
f=plt.figure(figsize=(4,4))
ax=f.add_subplot(111)
if len(names)==2:
n1,n2=names
v = venn2([set(n1), set(n2)], set_labels=labels, **kwargs)
elif len(names)==3:
n1,n2,n3=names
v = venn3([set(n1), set(n2), set(n3)], set_labels=labels, **kwargs)
ax.axis('off')
#f.patch.set_visible(False)
ax.set_axis_off()
return
def heatmap(df,fname=None,cmap='seismic',log=False):
"""Plot a heat map"""
from matplotlib.colors import LogNorm
f=plt.figure(figsize=(8,8))
ax=f.add_subplot(111)
norm=None
df=df.replace(0,.1)
if log==True:
norm=LogNorm(vmin=df.min().min(), vmax=df.max().max())
hm = ax.pcolor(df,cmap=cmap,norm=norm)
plt.colorbar(hm,ax=ax,shrink=0.6,norm=norm)
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns, rotation=90)
#ax.axvline(4, color='gray'); ax.axvline(8, color='gray')
plt.tight_layout()
if fname != None:
f.savefig(fname+'.png')
return ax
def plot_read_lengths(filename, df=None):
"""View read length distributions"""
df = utils.fastq_to_dataframe(filename, size=5e5)
x = analysis.read_length_dist(df)
fig,ax=plt.subplots(1,1,figsize=(10,4))
ax.bar(x[1][:-1],x[0], align='center')
return fig
def plot_sample_variation(df):
fig,axs=plt.subplots(2,1,figsize=(6,6))
axs=axs.flat
cols,ncols = mirdeep2.get_column_names(m)
x = m.ix[2][cols]
x.plot(kind='bar',ax=axs[0])
x2 = m.ix[2][ncols]
x2.plot(kind='bar',ax=axs[1])
sns.despine(trim=True,offset=10)
plt.tight_layout()
return fig
def plot_by_label(X, palette='Set1'):
"""Color scatter plot by dataframe index label"""
import seaborn as sns
cats = X.index.unique()
colors = sns.mpl_palette(palette, len(cats))
#sns.palplot(colors)
f,ax = plt.subplots(figsize=(6,6))
for c, i in zip(colors, cats):
#print X.ix[i,0]
ax.scatter(X.ix[i, 0], X.ix[i, 1], color=c, s=100, label=i,
lw=1, edgecolor='black')
ax.legend(fontsize=10)
sns.despine()
return
def plot_fractions(df, label=None):
"""Process results of multiple mappings to get fractions
of each annotations mapped
label: plot this sample only"""
fig,ax = plt.subplots(figsize=(8,8))
df = df.set_index('label')
df = df._get_numeric_data()
if len(df) == 1:
label = df.index[0]
if label != None:
ax = df.T.plot(y=label,kind='pie',colormap='Spectral',autopct='%.1f%%',
startangle=0, labels=None,legend=True,pctdistance=1.1,
fontsize=10, ax=ax)
else:
ax = df.plot(kind='barh',stacked=True,linewidth=0,cmap='Spectral',ax=ax)
#ax.legend(ncol=2)
ax.set_position([0.2,0.1,0.6,0.8])
ax.legend(loc="best",bbox_to_anchor=(1.0, .9))
plt.title('fractions mapped')
#plt.tight_layout()
return fig
def plot_sample_counts(counts):
fig,ax = plt.subplots(figsize=(10,6))
scols,ncols = base.get_column_names(counts)
counts[scols].sum().plot(kind='bar',ax=ax)
plt.title('total counts per sample (unnormalised)')
plt.tight_layout()
return fig
def plot_read_count_dists(counts, h=8, n=50):
"""Boxplots of read count distributions """
scols,ncols = base.get_column_names(counts)
df = counts.sort_values(by='mean_norm',ascending=False)[:n]
df = df.set_index('name')[ncols]
t = df.T
w = int(h*(len(df)/60.0))+4
fig, ax = plt.subplots(figsize=(w,h))
if len(scols) > 1:
sns.stripplot(data=t,linewidth=1.0,palette='coolwarm_r')
ax.xaxis.grid(True)
else:
df.plot(kind='bar',ax=ax)
sns.despine(offset=10,trim=True)
ax.set_yscale('log')
plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)
plt.ylabel('read count')
#print (df.index)
#plt.tight_layout()
fig.subplots_adjust(bottom=0.2,top=0.9)
return fig
def expression_clustermap(counts, freq=0.8):
scols,ncols = base.get_column_names(counts)
X = counts.set_index('name')[ncols]
X = np.log(X)
v = X.std(1).sort_values(ascending=False)
X = X[X.isnull().sum(1)/len(X.columns)<0.2]
X = X.fillna(0)
cg = sns.clustermap(X,cmap='YlGnBu',figsize=(12,12),lw=0,linecolor='gray')
mt = plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0, fontsize=9)
mt = plt.setp(cg.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
return cg
def plot_PCA(X, cmap='Spectral', colors=None, dims=(0,1), ax=None, annotate=None, legend=True, **kwargs):
'''
plot PCA from matrix and label
:X: dataframe with index as categories
:dims: dimensions to plot
:return: None
'''
from sklearn import preprocessing
from sklearn.decomposition.pca import PCA
X = X._get_numeric_data()
S = pd.DataFrame(preprocessing.scale(X),columns = X.columns)
pca = PCA(n_components=4)
pca.fit(S)
out = 'explained variance %s' %pca.explained_variance_ratio_
print (out)
#print pca.components_
w = pd.DataFrame(pca.components_,columns=S.columns)
#print (w.T.max(1).sort_values())
pX = pca.fit_transform(S)
pX = pd.DataFrame(pX,index=X.index)
### graph
if ax is None:
fig,ax=plt.subplots(1,1,figsize=(8,8))
cats = pX.index.unique()
if colors is None:
colors = sns.mpl_palette(cmap, len(cats))
y1,y2=dims
offset = 7
for c, i in zip(colors, cats):
ax.scatter(pX.loc[i, y1], pX.loc[i, y2], color=c, label=i, edgecolor='black', **kwargs)
if annotate is not None:
pX['lab#el'] = annotate
i=0
for idx,r in pX.iterrows():
x=r[y1]; y=r[y2]
l=annotate[i]
ax.annotate(l, (x,y), xycoords='data',xytext=(2,5),textcoords='offset points',fontsize=12)
i+=1
ax.set_xlabel("X[%s]" %y1)
ax.set_ylabel("X[%s]" %y2)
if legend == True:
ax.set_position([0.1,0.1,0.5,0.8])
ax.legend(loc="best",bbox_to_anchor=(1.0, .9))
ax.set_title("PCA")
return pX
| gpl-3.0 |
alshedivat/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py | 30 | 40476 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.summary import summary
try:
# pylint: disable=g-import-not-at-top
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(embeddings_anchor - embeddings_positive), 1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.to_float(labels) * math_ops.square(distances) +
(1. - math_ops.to_float(labels)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def triplet_semihard_loss(labels, embeddings, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
return triplet_loss
# pylint: disable=line-too-long
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def _build_multilabel_adjacency(sparse_labels):
"""Builds multilabel adjacency matrix.
As of March 14th, 2017, there's no op for the dot product between
two sparse tensors in TF. However, there is `sparse_minimum` op which is
equivalent to an AND op between two sparse boolean tensors.
This computes the dot product between two sparse boolean inputs.
Args:
sparse_labels: List of 1-D boolean sparse tensors.
Returns:
adjacency_matrix: 2-D dense `Tensor`.
"""
num_pairs = len(sparse_labels)
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.to_float(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])))
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
[[i, num_pairs-i-1],
[j, num_pairs-j-1]], 'CONSTANT')
adjacency_matrix += one_hot_matrix
return adjacency_matrix
def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
embeddings_positive, reg_lambda=0.002,
print_losses=False):
r"""Computes the npairs loss with multilabel data.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels. Here, the similarity is defined by the
dot product between two embedding vectors. S_{i,j} = f(x_i)^T f(x_j)
To deal with multilabel inputs, we use the count of label intersection
i.e. L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
Then we normalize each rows of the count based label matrix so that each row
sums to one.
Args:
sparse_labels: List of 1-D Boolean `SparseTensor` of dense_shape
[batch_size/2, num_classes] labels for the anchor-pos pairs.
embeddings_anchor: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
Raises:
TypeError: When the specified sparse_labels is not a `SparseTensor`.
"""
if False in [isinstance(
l, sparse_tensor.SparseTensor) for l in sparse_labels]:
raise TypeError(
'sparse_labels must be a list of SparseTensors, but got %s' % str(
sparse_labels))
with ops.name_scope('NpairsLossMultiLabel'):
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(0.25 * reg_lambda,
reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# TODO(coreylynch): need to check the sparse values
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than any negative distances (between a
pair of embeddings with different labels) in the mini-batch in a way
that is differentiable with respect to the embedding vectors.
See: https://arxiv.org/abs/1511.06452.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
diff = margin - pairwise_distances
mask = math_ops.cast(adjacency_not, dtype=dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(diff - row_minimums, mask), 1,
keepdims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = math_ops.maximum(
row_negative_maximums, array_ops.transpose(row_negative_maximums))
diff_tiled = array_ops.tile(diff, [batch_size, 1])
mask_tiled = array_ops.tile(mask, [batch_size, 1])
max_elements_vect = array_ops.reshape(
array_ops.transpose(max_elements), [-1, 1])
loss_exp_left = array_ops.reshape(
math_ops.reduce_sum(
math_ops.multiply(
math_ops.exp(diff_tiled - max_elements_vect), mask_tiled),
1,
keepdims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = math_ops.reduce_sum(mask_positives) / 2.0
lifted_loss = math_ops.truediv(
0.25 * math_ops.reduce_sum(
math_ops.square(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0))),
num_positives,
name='liftedstruct_loss')
return lifted_loss
def update_1d_tensor(y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = array_ops.squeeze(value)
# modify the 1D tensor x at index with value.
# ex) chosen_ids = update_1D_tensor(chosen_ids, cluster_idx, best_medoid)
y_before = array_ops.slice(y, [0], [index])
y_after = array_ops.slice(y, [index + 1], [-1])
y_mod = array_ops.concat([y_before, [value], y_after], 0)
return y_mod
def get_cluster_assignment(pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Tensorflow has numerical instability and doesn't always choose
the data point with theoretically zero distance as it's nearest neighbor.
Thus, for each centroid in centroid_ids, explicitly assign
the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = math_ops.argmin(
array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
batch_size = array_ops.shape(pairwise_distances)[0]
# Deal with numerical instability
mask = math_ops.reduce_any(array_ops.one_hot(
centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
axis=0)
constraint_one_hot = math_ops.multiply(
array_ops.one_hot(centroid_ids,
batch_size,
array_ops.constant(1, dtype=dtypes.int64),
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
y_fixed = array_ops.where(mask, constraint_vect, predictions)
return y_fixed
def compute_facility_energy(pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: dtypes.float32 scalar.
"""
return -1.0 * math_ops.reduce_sum(
math_ops.reduce_min(
array_ops.gather(pairwise_distances, centroid_ids), axis=0))
def compute_clustering_score(labels, predictions, margin_type):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
for the detailed descriptions.
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': _compute_nmi_score,
'ami': _compute_ami_score,
'ari': _compute_ari_score,
'vmeasure': _compute_vmeasure_score,
'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
clustering_score_fn = margin_type_to_func[margin_type]
return array_ops.squeeze(clustering_score_fn(labels, predictions))
def _compute_nmi_score(labels, predictions):
return math_ops.to_float(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'))
def _compute_ami_score(labels, predictions):
ami_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'))
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'))
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.to_float(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'))
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.to_float(
math_ops.equal(
math_ops.reduce_sum(
math_ops.to_int32(math_ops.equal(labels, predictions))),
array_ops.shape(labels)[0]))
return zeroone_score
def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of current centroid indices.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = array_ops.shape(candidate_ids)[0]
pairwise_distances_chosen = array_ops.gather(pairwise_distances, chosen_ids)
pairwise_distances_candidate = array_ops.gather(
pairwise_distances, candidate_ids)
pairwise_distances_chosen_tile = array_ops.tile(
pairwise_distances_chosen, [1, num_candidates])
candidate_scores = -1.0 * math_ops.reduce_sum(
array_ops.reshape(
math_ops.reduce_min(
array_ops.concat([
pairwise_distances_chosen_tile,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keepdims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
iteration = array_ops.constant(0)
def func_cond(iteration, nmi_scores):
del nmi_scores # Unused in func_cond()
return iteration < num_candidates
def func_body(iteration, nmi_scores):
predictions = get_cluster_assignment(
pairwise_distances,
array_ops.concat([chosen_ids, [candidate_ids[iteration]]], 0))
nmi_score_i = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
# return 1 - NMI score as the structured loss.
# because NMI is higher the better [0,1].
return iteration + 1, nmi_scores + array_ops.concat(
[pad_before, [1.0 - nmi_score_i], pad_after], 0)
_, nmi_scores = control_flow_ops.while_loop(
func_cond, func_body, [iteration, nmi_scores])
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
return candidate_ids[argmax_index]
def compute_augmented_facility_locations(pairwise_distances, labels, all_ids,
margin_multiplier, margin_type):
"""Computes the centroid locations.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: 1-D Tensor of all data indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: 1-D Tensor of chosen centroid indices.
"""
def func_cond_augmented(iteration, chosen_ids):
del chosen_ids # Unused argument in func_cond_augmented.
return iteration < num_classes
def func_body_augmented(iteration, chosen_ids):
# find a new facility location to add
# based on the clustering score and the NMI score
candidate_ids = array_ops.setdiff1d(all_ids, chosen_ids)[0]
new_chosen_idx = _find_loss_augmented_facility_idx(pairwise_distances,
labels, chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = array_ops.concat([chosen_ids, [new_chosen_idx]], 0)
return iteration + 1, chosen_ids
num_classes = array_ops.size(array_ops.unique(labels)[0])
chosen_ids = array_ops.constant(0, dtype=dtypes.int32, shape=[0])
# num_classes get determined at run time based on the sampled batch.
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented,
func_body_augmented, [iteration, chosen_ids],
shape_invariants=[iteration.get_shape(), tensor_shape.TensorShape(
[None])])
return chosen_ids
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond(iteration, scores_margin):
del scores_margin # Unused variable scores_margin.
return iteration < num_candidates
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
return iteration + 1, scores_margin + array_ops.concat(
[pad_before, [1.0 - metric_score], pad_after], 0)
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * math_ops.reduce_sum(
array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
_, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
margin_multiplier, margin_type):
"""Updates all cluster medoids a cluster at a time.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
predictions: 1-D Tensor of predicted cluster assignment.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond_augmented_pam(iteration, chosen_ids):
del chosen_ids # Unused argument.
return iteration < num_classes
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.to_int64(predictions), math_ops.to_int64(iteration))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
chosen_ids = update_medoid_per_cluster(pairwise_distances,
pairwise_distances_subset, labels,
chosen_ids, this_cluster_ids,
iteration, margin_multiplier,
margin_type)
return iteration + 1, chosen_ids
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented_pam, func_body_augmented_pam, [iteration, chosen_ids])
return chosen_ids
def compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids,
pam_max_iter=5):
"""Refine the cluster centroids with PAM local search.
For fixed iterations, alternate between updating the cluster assignment
and updating cluster medoids.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
pam_max_iter: Number of refinement iterations.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
for _ in range(pam_max_iter):
# update the cluster assignment given the chosen_ids (S_pred)
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# update the medoids per each cluster
chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
chosen_ids, margin_multiplier, margin_type)
return chosen_ids
def compute_gt_cluster_score(pairwise_distances, labels):
"""Compute ground truth facility location score.
Loop over each unique classes and compute average travel distances.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
Returns:
gt_cluster_score: dtypes.float32 score.
"""
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)
def func_cond(iteration, gt_cluster_score):
del gt_cluster_score # Unused argument.
return iteration < num_classes
def func_body(iteration, gt_cluster_score):
"""Per each cluster, compute the average travel distance."""
mask = math_ops.equal(labels, unique_class_ids[iteration])
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
this_cluster_score = -1.0 * math_ops.reduce_min(
math_ops.reduce_sum(
pairwise_distances_subset, axis=0))
return iteration + 1, gt_cluster_score + this_cluster_score
_, gt_cluster_score = control_flow_ops.while_loop(
func_cond, func_body, [iteration, gt_cluster_score])
return gt_cluster_score
def cluster_loss(labels,
embeddings,
margin_multiplier,
enable_pam_finetuning=True,
margin_type='nmi',
print_losses=False):
"""Computes the clustering loss.
The following structured margins are supported:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
Args:
labels: 2-D Tensor of labels of shape [batch size, 1]
embeddings: 2-D Tensor of embeddings of shape
[batch size, embedding dimension]. Embeddings should be l2 normalized.
margin_multiplier: float32 scalar. multiplier on the structured margin term
See section 3.2 of paper for discussion.
enable_pam_finetuning: Boolean, Whether to run local pam refinement.
See section 3.4 of paper for discussion.
margin_type: Type of structured margin to use. See section 3.2 of
paper for discussion. Can be 'nmi', 'ami', 'ari', 'vmeasure', 'const'.
print_losses: Boolean. Option to print the loss.
Paper: https://arxiv.org/abs/1612.01213.
Returns:
clustering_loss: A float32 scalar `Tensor`.
Raises:
ImportError: If sklearn dependency is not installed.
"""
if not HAS_SKLEARN:
raise ImportError('Cluster loss depends on sklearn.')
pairwise_distances = pairwise_distance(embeddings)
labels = array_ops.squeeze(labels)
all_ids = math_ops.range(array_ops.shape(embeddings)[0])
# Compute the loss augmented inference and get the cluster centroids.
chosen_ids = compute_augmented_facility_locations(pairwise_distances, labels,
all_ids, margin_multiplier,
margin_type)
# Given the predicted centroids, compute the clustering score.
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Branch whether to use PAM finetuning.
if enable_pam_finetuning:
# Initialize with augmented facility solution.
chosen_ids = compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids)
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Given the predicted centroids, compute the cluster assignments.
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# Compute the clustering (i.e. NMI) score between the two assignments.
clustering_score_pred = compute_clustering_score(labels, predictions,
margin_type)
# Compute the clustering score from labels.
score_gt = compute_gt_cluster_score(pairwise_distances, labels)
# Compute the hinge loss.
clustering_loss = math_ops.maximum(
score_pred + margin_multiplier * (1.0 - clustering_score_pred) - score_gt,
0.0,
name='clustering_loss')
clustering_loss.set_shape([])
if print_losses:
clustering_loss = logging_ops.Print(
clustering_loss,
['clustering_loss: ', clustering_loss, array_ops.shape(
clustering_loss)])
# Clustering specific summary.
summary.scalar('losses/score_pred', score_pred)
summary.scalar('losses/' + margin_type, clustering_score_pred)
summary.scalar('losses/score_gt', score_gt)
return clustering_loss
| apache-2.0 |
tangyouze/tushare | tushare/datayes/future.py | 17 | 1740 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Future():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Futu(self, exchangeCD='', secID='', ticker='', contractObject='', field=''):
"""
获取国内四大期货交易所期货合约的基本要素信息,
包括合约名称、合约代码、合约类型、合约标的、报价单位、最小变动价位、涨跌停板幅度、交易货币、
合约乘数、交易保证金、上市日期、最后交易日、交割日期、交割方式、交易手续费、交割手续费、挂牌基准价、合约状态等。
"""
code, result = self.client.getData(vs.FUTU%(exchangeCD, secID, ticker, contractObject, field))
return _ret_data(code, result)
def FutuConvf(self, secID='', ticker='', field=''):
"""
获取国债期货转换因子信息,包括合约可交割国债名称、可交割国债交易代码、转换因子等。
"""
code, result = self.client.getData(vs.FUTUCONVF%(secID, ticker, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
vishwa91/OptSys | examples/objective.py | 1 | 1478 | #!/usr/bin/env python3
import os, sys
sys.path.append('../modules')
import numpy as np
import matplotlib.pyplot as plt
import raytracing as rt
import visualize as vis
import ray_utilities
if __name__ == '__main__':
# Create a relay lens system
components = []
rays = []
image_plane = -300
nrays = 10
# Objective is simulated using two lenses
components.append(rt.Lens(f=30,
aperture=100,
pos=[0,0],
theta=0))
# Second lens creates the flange focal distance
components.append(rt.Lens(f=13,
aperture=50,
pos=[20,0],
theta=0))
# Create three points and three rays from each point
rays += ray_utilities.ray_fan([image_plane, 200], [-np.pi/5, -np.pi/6], nrays)
rays += ray_utilities.ray_fan([image_plane, 0], [-np.pi/30, np.pi/30], nrays)
rays += ray_utilities.ray_fan([image_plane, -200], [np.pi/6, np.pi/5], nrays)
colors = 'r'*nrays + 'g'*nrays + 'b'*nrays
# Propagate the rays
ray_bundles = rt.propagate_rays(components, rays)
# Create a new canvas
canvas = vis.Canvas([-300, 100], [-200, 200])
# Draw the components
canvas.draw_components(components)
# Draw the rays
canvas.draw_rays(ray_bundles, colors)
# Show the system
canvas.show()
# Save a copy
canvas.save('objective.png')
| mit |
coreyabshire/stacko | src/competition_utilities.py | 1 | 5336 | from __future__ import division
from collections import Counter
import csv
import dateutil
from datetime import datetime
from dateutil.relativedelta import relativedelta
import numpy as np
import os
import pandas as pd
import pymongo
data_path = "C:/Projects/ML/stacko/data2"
submissions_path = data_path
if not data_path or not submissions_path:
raise Exception("Set the data and submission paths in competition_utilities.py!")
def parse_date_maybe_null(date):
if date:
return dateutil.parser.parse(date)
return None
df_converters = {"PostCreationDate": dateutil.parser.parse,
"OwnerCreationDate": dateutil.parser.parse}
# "PostClosedDate": parse_date_maybe_null}
# It may be more convenient to get rid of the labels and just use
# numeric identifiers for the categories. I would want to do this
# in the order given below, which models the relative frequency of
# each category in the training set provided. This has the added
# convenience that open vs closed becomes 0 vs not 0. I can always
# look up the text labels later in the array given below.
labels = [
'open',
'not a real question',
'off topic',
'not constructive',
'too localized']
sorted_labels = [label for label in labels]
sorted_labels.sort()
def get_reader(file_name="train-sample.csv"):
reader = csv.reader(open(os.path.join(data_path, file_name)))
header = reader.next()
return reader
def get_header(file_name="train-sample.csv"):
reader = csv.reader(open(os.path.join(data_path, file_name)))
header = reader.next()
return header
def get_closed_count(file_name):
return sum(1 for q in iter_closed_questions(file_name))
def iter_closed_questions(file_name):
df_iter = pd.io.parsers.read_csv(os.path.join(data_path, file_name), iterator=True, chunksize=1000)
return (question[1] for df in df_iter for question in df[df["OpenStatus"] != "open"].iterrows())
def iter_open_questions(file_name):
df_iter = pd.io.parsers.read_csv(os.path.join(data_path, file_name), iterator=True, chunksize=1000)
return (question[1] for df in df_iter for question in df[df["OpenStatus"] == "open"].iterrows())
def get_dataframe(file_name="train-sample.csv"):
return pd.io.parsers.read_csv(os.path.join(data_path, file_name), converters = df_converters)
def compute_priors(closed_reasons):
closed_reason_counts = Counter(closed_reasons)
reasons = sorted(closed_reason_counts.keys())
total = len(closed_reasons)
priors = [closed_reason_counts[reason]/total for reason in reasons]
return priors
def get_priors(file_name):
return compute_priors([r[14] for r in get_reader(file_name)])
def write_sample(file_name, header, sample):
writer = csv.writer(open(os.path.join(data_path, file_name), "w"), lineterminator="\n")
writer.writerow(header)
writer.writerows(sample)
def update_prior(old_prior, old_posterior, new_prior):
evidence_ratio = (old_prior*(1-old_posterior)) / (old_posterior*(1-old_prior))
new_posterior = new_prior / (new_prior + (1-new_prior)*evidence_ratio)
return new_posterior
def cap_and_update_priors(old_priors, old_posteriors, new_priors, epsilon):
old_posteriors = cap_predictions(old_posteriors, epsilon)
old_priors = np.kron(np.ones((np.size(old_posteriors, 0), 1)), old_priors)
new_priors = np.kron(np.ones((np.size(old_posteriors, 0), 1)), new_priors)
evidence_ratio = (old_priors*(1-old_posteriors)) / (old_posteriors*(1-old_priors))
new_posteriors = new_priors / (new_priors + (1-new_priors)*evidence_ratio)
new_posteriors = cap_predictions(new_posteriors, epsilon)
return new_posteriors
def cap_predictions(probs, epsilon):
probs[probs>1-epsilon] = 1-epsilon
probs[probs<epsilon] = epsilon
row_sums = probs.sum(axis=1)
probs = probs / row_sums[:, np.newaxis]
return probs
def write_submission(file_name, predictions):
with open(os.path.join(submissions_path, file_name), "w") as outfile:
writer = csv.writer(outfile, lineterminator="\n")
writer.writerows(predictions)
def get_submission_reader(file_name="submission.csv"):
reader = csv.reader(open(os.path.join(data_path, file_name)))
return reader
def get_dataframe_mongo_query(query):
db = pymongo.Connection().stacko
return pd.DataFrame([r for r in db.train.find(query)])
def get_sample_data_frame(limit=0):
db = pymongo.Connection().stacko
return pd.DataFrame([r for r in db.sample2.find(limit=limit)])
def get_sample_data_frame_by_status(status, limit=0):
db = pymongo.Connection().stacko
return pd.DataFrame([r for r in db.sample2.find(
{"OpenStatus": status}, limit=limit)])
def get_test_data_frame(limit=0):
db = pymongo.Connection().stacko
return pd.DataFrame([r for r in db.test2.find(limit=limit)])
def get_dataframe_mongo_date_range(start, end):
query = {'PostCreationDate': {'$gte': start, '$lt': end}}
return get_dataframe_mongo_query(query)
def get_dataframe_mongo_months(year, month, count):
start = datetime(year, month, 1)
relative = relativedelta(months = count)
end = start + relative
return get_dataframe_mongo_date_range(start, end)
def load_priors(name='train.csv'):
return pymongo.Connection().stacko.priors.find_one({'for':name})['priors']
| bsd-2-clause |
madjelan/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
akshayka/edxclassify | edxclassify/classifiers/clf_util.py | 1 | 3557 | import numpy as np
from sklearn.cross_validation import StratifiedKFold
from sklearn import metrics
from sklearn.externals import joblib
import skll
def load_clf(pkl_file):
"""Load a joblib-dumped data_cleaner and trained classifier"""
data_cleaner, clf = joblib.load(pkl_file)
return data_cleaner, clf
def extract_feature_names(feature_union):
pipelines = feature_union.transformer_list
feature_names = []
for name, pipeline in pipelines:
dv = pipeline.steps[-1][-1]
if not hasattr(dv, 'get_feature_names'):
raise AttributeError("Dictionary %s does not provide "
"get_feature_names." % str(name))
feature_names.extend([name + ' ' + f for f in
dv.get_feature_names()])
return np.asarray(feature_names)
def sklearn_cv(clf, X, y):
"""Evaluate training and test set error using stratified K-fold
cross validation.
parameters:
----------
clf - a scikit-learn pipelined estimator.
X - a list of feature vectors
y - a list of labels, with y[i] the label for X[i]
returns:
--------
train_error_metrics: A list that itself contains four lists, p, r, f, K,
each with length 10 (corresponding to the number of
folds):
Element i in p is a list whose jth element is the
precision for class j in the ith fold;
Element i in r is a list whose jth element is the
recall for class j in the ith fold;
Element i in f is a list whose jth element is the
f1 for class j in the ith fold; and
Element i in K is the Kappa Coefficient for the
ith fold.
test_error_metrics: Like train_error_metrics, but for the test_set_error.
"""
X, y = np.array(X), np.array(y)
skf = StratifiedKFold(y, n_folds=10)
precision_train = []
recall_train = []
f1_train = []
kappa_train = []
precision_test = []
recall_test = []
f1_test = []
kappa_test = []
for train_indices, test_indices in skf:
print 'cross_validating ...'
# Partition the dataset, as per the fold partitioning.
X_train, X_test = X[train_indices], X[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
# Train the classifier and
# extract the most highly weighted features.
clf.fit(X_train, y_train)
# Predict labels for the train set.
y_pred = clf.predict(X_train)
precision_train.append(metrics.precision_score(y_train, y_pred,
average=None))
recall_train.append(metrics.recall_score(y_train, y_pred, average=None))
f1_train.append(metrics.f1_score(y_train, y_pred, average=None))
kappa_train.append(skll.metrics.kappa(y_train, y_pred))
# Predict labels for the test set.
y_pred = clf.predict(X_test)
precision_test.append(metrics.precision_score(y_test, y_pred,
average=None))
recall_test.append(metrics.recall_score(y_test, y_pred, average=None))
f1_test.append(metrics.f1_score(y_test, y_pred, average=None))
kappa_test.append(skll.metrics.kappa(y_test, y_pred))
return [precision_train, recall_train, f1_train, kappa_train],\
[precision_test, recall_test, f1_test, kappa_test]
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.