repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
plazas/wfirst-detectors-vnl | code/bias_nonlinearity_vs_beta_version2_vs_magnitude.py | 1 | 27257 | #!/usr/bin/python
import numpy as np
import os
import sys
import math
import matplotlib
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.font_manager as fm
## 7-20-15
## Simple code to explore NL as a function of beta, by using interleaving method
import galsim
import galsim.wfirst as wfirst
filters = wfirst.getBandpasses (AB_zeropoint=True)
import logging
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("tests_hsm_interleaving")
f=lambda x,beta : x - beta*x*x
from galsim.cdmodel import *
from sim2 import * ## where all the BF stuff is
from scipy import optimize
def measurement_function_NL (profile, e1_inter_vec=[], e2_inter_vec=[], size_inter_vec=[], noise=None, beta=3.566e-7, string='', type='nl'):
print " "
print "n: ", n
logger.info (string)
print "beta: ", beta
#profile_before=profile.withScaledFlux(n*n)
"""
#### Calculate moments without effect
print "FLUX: ", profile.getFlux()
image=profile.drawImage(image=galsim.Image(base_size, base_size), scale=pixel_scale/n, method='no_pixel')
print np.amax(image.array), np.sum(image.array)
if not noise == None:
read_noise = galsim.GaussianNoise(sigma=noise/(n**2))
image.addNoise(read_noise)
results=image.FindAdaptiveMom(hsmparams=new_params)
ref_e1=results.observed_shape.e1
ref_e2=results.observed_shape.e2
ref_s=results.moments_sigma
print "Image shape, before interleave: ", image.array.shape
##Create fits image in disk
file_name="object_before_interleaving.fits"
image.write(file_name)
print "ref_e1, ref_e2, ref_s", ref_e1, ref_e2, ref_s
#### Calculate moments with the effect
image=profile.drawImage(image=galsim.Image(base_size, base_size), scale=pixel_scale/n, method='no_pixel')
print np.amax(image.array), np.sum(image.array)
if not noise == None:
read_noise = galsim.GaussianNoise(sigma=noise/(n**2))
image.addNoise(read_noise)
image*=n*n
image.applyNonlinearity(f,beta)
results=image.FindAdaptiveMom(hsmparams=new_params)
print "results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma ", results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma
print "Differences: ", results.observed_shape.e1 - ref_e1, results.observed_shape.e2 - ref_e2, (results.moments_sigma - ref_s) / ref_s
"""
## Interleave the profiles with NO EFFECT
im_list=[]
offsets_list=[]
#create list of images to interleave-no effect
for j in xrange(n):
for i in xrange(n):
im=galsim.Image(base_size, base_size)
offset=galsim.PositionD(-(i+0.5)/n+0.5, -(j+0.5)/n+0.5)
offsets_list.append(offset)
#print "Offset: ", offset
profile.drawImage(image=im, scale=pixel_scale, offset=offset, method='no_pixel')
im_list.append(im)
image=galsim.utilities.interleaveImages(im_list=im_list, N=(n,n), offsets=offsets_list, add_flux=True)
print "Image shape, after interleave, no effect: ", image.array.shape
if not noise == None:
read_noise = galsim.GaussianNoise(sigma=noise)
image.addNoise(read_noise)
results=image.FindAdaptiveMom(hsmparams=new_params)
ref_e1=results.observed_shape.e1
ref_e2=results.observed_shape.e2
ref_s=results.moments_sigma
print "ref_e1, ref_e2, ref_s", ref_e1, ref_e2, ref_s
file_name="object_after_interleaving_n_%g_no_effect.fits" %n
image.write(file_name)
## Interleave the profiles with the effect
im_list=[]
offsets_list=[]
#create list of images to interleave-no effect
for j in xrange(n):
for i in xrange(n):
im=galsim.Image(base_size, base_size)
offset=galsim.PositionD(-(i+0.5)/n+0.5, -(j+0.5)/n+0.5)
offsets_list.append(offset)
#print "Offset: ", offset
profile.drawImage(image=im, scale=pixel_scale, offset=offset, method='no_pixel')
if type == 'bf':
#cd = PowerLawCD(5, 5.e-7, 5.e-7, 1.5e-7, 1.5e-7, 2.5e-7, 2.5e-7, 1.3)
cd = BaseCDModel (aL,aR,aB,aT)
im=cd.applyForward(im)
elif type == 'nl':
#print "Skipping NL"
im.applyNonlinearity(f,beta)
else:
print "wrong type: 'bf' or 'nl' "
sys.exit(1)
im_list.append(im)
image2=galsim.utilities.interleaveImages(im_list=im_list, N=(n,n), offsets=offsets_list, add_flux=True)
print "Image shape, after interleave: ", image2.array.shape
if not noise == None:
read_noise = galsim.GaussianNoise(sigma=noise)
image2.addNoise(read_noise)
results=image2.FindAdaptiveMom(hsmparams=new_params)
e1_inter_vec.append (results.observed_shape.e1 - ref_e1)
e2_inter_vec.append (results.observed_shape.e2 - ref_e2)
size_inter_vec.append ( (results.moments_sigma - ref_s) / ref_s)
print "results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma ", results.observed_shape.e1, results.observed_shape.e2, results.moments_sigma
print "Differences: ", results.observed_shape.e1 - ref_e1, results.observed_shape.e2 - ref_e2, (results.moments_sigma - ref_s) / ref_s
file_name="object_after_interleaving_n_%g_effect.fits" %n
image2.write(file_name)
##Take the difference of the images
diff=image-image2
print "diff: ", diff
file_name="diff_object_interleaving_n_%g.fits" %n
diff.write(file_name)
### Parameters
k=1024
base_size=1*k + 1## ??
n=7
m_zero=20 # 24
#m_gal=20
#gal_flux=6e4*2.521**(m_zero-m_gal)
gal_sigma=0.1
print "gal_sigma", gal_sigma
pixel_scale=0.11
noise=20
#e=0.0
type='nl'
#if type == 'bf':
# (aL,aR,aB,aT) = readmeanmatrices()
#lam = 1380. # NB: don't use lambda - that's a reserved word.
tel_diam = 2.4
obscuration_optical=0.3
#lam_over_diam = lam * 1.e-9 / tel_diam * galsim.radians
#lam_over_diam = lam_over_diam / galsim.arcsec
#print "lam_over_diam", lam_over_diam
#Define wavelengths, ellipticities, and magnitudes
wavelength_dict={'Z087':0.869,'Y106':1.060, 'J129':1.293, 'W149':1.485, 'H158':1.577, 'F184':1.842} # in microns
#wavelength_dict={'Y106':1.060, 'H158':1.577}
flux_dict={'Z087':8.57192e4,'Y106':8.68883e4, 'J129':8.76046e4, 'W149':2.68738e4, 'H158':8.81631e4, 'F184':6.08258e4}
#e_vec=[ (0., 0.), (0.05, 0.), (0., 0.05), (0.05, 0.05) ]#, (0., 0.075), (0.075, 0.), (0.075, 0.075)] #, 0.05, 0.06, 0.07, 0.08]
#e_vec= [(-0.05, 0.), (-0.025, 0.), (0.0, 0.0), (0.05, 0.), (0.025, 0.), (0.0, -0.05), (0.0, -0.025), (0.0, 0.025), (0.0, 0.05)]
#e_vec= [ (-0.05, 0.), (-0.025, 0.), (0.0, 0.0), (0.025, 0.), (0.05, 0.) ]
e_vec=[(0.0, 0.0)]
new_params = galsim.hsm.HSMParams(max_amoment=60000000, max_mom2_iter=10000000, max_moment_nsig2=10000)
big_fft_params = galsim.GSParams(maximum_fft_size=4096)
m_gal_vec= [18, 19, 20, 21, 22,23,24]
beta0=3.566e-7
beta_vec=[ 1.5*beta0, 0.5*beta0, beta0, 1.5*beta0, 2*beta0, 5*beta0]
#print beta_vec
#sys.exit()
#vectors that will hold the output to plot
gauss_no_noise={}
optical_no_noise={}
gauss_noise={}
optical_noise={}
gauss_no_noise={}
optical_no_noise={}
gauss_noise={}
optical_noise={}
for lam in wavelength_dict:
gauss_no_noise[lam]={} #\Delta e1, \Delta e2, \Delta R/R
optical_no_noise[lam]={}
gauss_noise[lam]={}
optical_noise[lam]={}
#for e in e_vec:
# gauss_no_noise[lam][e]=[[],[],[]] #\Delta e1, \Delta e2, \Delta R/R
# optical_no_noise[lam][e]=[[],[],[]]
# gauss_noise[lam][e]=[[],[],[]]
# optical_noise[lam][e]=[[],[],[]]
for m_gal in m_gal_vec:
gauss_no_noise[lam][m_gal]=[[],[],[]] #\Delta e1, \Delta e2, \Delta R/R
optical_no_noise[lam][m_gal]=[[],[],[]]
gauss_noise[lam][m_gal]=[[],[],[]]
optical_noise[lam][m_gal]=[[],[],[]]
#for e in e_vec:
# gauss_no_noise[e]=[[],[],[]]
# optical_no_noise[e]=[[],[],[]]
# gauss_noise[e]=[[],[],[]]
# optical_noise[e]=[[],[],[]]
#for m_gal in m_gal_vec:
# gauss_no_noise[m_gal]=[[],[],[]]
# optical_no_noise[m_gal]=[[],[],[]]
# gauss_noise[m_gal]=[[],[],[]]
# optical_noise[m_gal]=[[],[],[]]
#for e in [e1_true]: ### Just one value of e1=0.01. Not really a nested loop.
for lam in wavelength_dict:
lam_over_diam = wavelength_dict[lam] * 1.e-6 / tel_diam * galsim.radians
lam_over_diam = lam_over_diam / galsim.arcsec
for e in e_vec:
for m_gal in m_gal_vec:
for beta in beta_vec:
logger.info(" ")
logger.info("ellipticity: (%g, %g)", e[0], e[1] )
logger.info("lambda: %s microns", wavelength_dict[lam])
logger.info("beta: %g", beta)
logger.info("magnitude: %g", m_gal)
# Gaussian
# no noise
#logger.info("First loop: gaussian, no noise")
gal_flux=flux_dict[lam]*2.512**(m_zero-m_gal)
#gal= galsim.Convolve (galsim.Gaussian(flux=gal_flux, sigma=gal_sigma).shear(galsim.Shear(e1=e[0],e2=e[1])) , galsim.Pixel(pixel_scale), gsparams=big_fft_params )
#gal.shift (0.5, 0.5)
#measurement_function_NL (gal, e1_inter_vec=gauss_no_noise[lam][m_gal][0], e2_inter_vec=gauss_no_noise[lam][m_gal][1], size_inter_vec=gauss_no_noise[lam][m_gal][2], noise=None, beta=beta, string='Gausian, no noise')
#sys.exit(1)
###### noise
#measurement_function_NL (gal, e1_inter_vec=gauss_noise[m_gal][0], e2_inter_vec=gauss_noise[m_gal][1], size_inter_vec=gauss_noise[m_gal][2], noise=noise, beta=beta, string='Gaussian, noise')
#######################Optical
#logger.info("Third loop: Optical, no noise")
gal=galsim.Convolve (galsim.OpticalPSF(lam_over_diam, obscuration=obscuration_optical, flux=gal_flux).shear(galsim.Shear(e1=e[0],e2=e[1])), galsim.Pixel(pixel_scale), gsparams=big_fft_params )
#gal.shift (0.5, 0.5)
measurement_function_NL (gal, e1_inter_vec=optical_no_noise[lam][m_gal][0], e2_inter_vec=optical_no_noise[lam][m_gal][1], size_inter_vec=optical_no_noise[lam][m_gal][2], noise=None, beta=beta, string='Optical, no noise')
sys.exit(1)
###### noise
#measurement_function_NL (gal, e1_inter_vec=optical_noise[m_gal][0], e2_inter_vec=optical_noise[m_gal][1], size_inter_vec=optical_noise[m_gal][2], noise=noise, beta=beta, string='Optical, noise')
#########################WFIRST
#gal=wfirst.getPSF(SCAs=7,approximate_struts=True, wavelength=filters['W149'])[7].shear(galsim.Shear(e1=e, e2=e))
#measurement_function_NL (gal, true_e1=e, true_e2=e, true_s=0., e1_vec=w_e1, e2_vec=w_e2, size_vec=w_s, e1_inter_vec=wi_e1, e2_inter_vec=wi_e2, size_inter_vec=wi_s, noise=None, string='WFIRST, no noise')
# noise
#measurement_function_NL (gal, true_e1=e, true_e2=e, true_s=0., e1_vec=nw_e1, e2_vec=nw_e2, size_vec=nw_s, e1_inter_vec=nwi_e1, e2_inter_vec=nwi_e2, size_inter_vec=nwi_s, noise=noise, string='WFIRST, noise')
#factor_vec=xrange(1,11)
#for e in [e_vec[1]]:
# for factor in factor_vec:
pp=PdfPages("test_bias_NL_vs_beta.pdf")
print "Name of output PDF file: test_bias_NL_vs_beta.pdf"
#### PLOTS
#### Do the plotting here
plt.minorticks_on()
#plt.tight_layout()
### We do not have matplotlib 1.1, with the 'style' package. Modify the matplotlibrc file parameters instead
import matplotlib as mpl
mpl.rc('lines', linewidth=1, color='black', linestyle='-')
mpl.rc('font', family='serif',weight='normal', size=10.0 )
mpl.rc('text', color='black', usetex=False)
mpl.rc('axes', edgecolor='black', linewidth=1, grid=False, titlesize=9, labelsize=11, labelweight='normal',labelcolor='black')
mpl.rc('axes.formatter', limits=[-4,4])
mpl.rcParams['xtick.major.size']=7
mpl.rcParams['xtick.minor.size']=4
mpl.rcParams['xtick.major.pad']=8
mpl.rcParams['xtick.minor.pad']=8
mpl.rcParams['xtick.labelsize']= '12'
mpl.rcParams['xtick.minor.width']= 1.0
mpl.rcParams['xtick.major.width']= 1.0
mpl.rcParams['ytick.major.size']=7
mpl.rcParams['ytick.minor.size']=4
mpl.rcParams['ytick.major.pad']=8
mpl.rcParams['ytick.minor.pad']=8
mpl.rcParams['ytick.labelsize']= '12'
mpl.rcParams['ytick.minor.width']= 1.0
mpl.rcParams['ytick.major.width']= 1.0
mpl.rc ('legend', numpoints=1, fontsize='12', shadow=False, frameon=False)
## Plot parameters
plt.subplots_adjust(hspace=0.01, wspace=0.01)
prop = fm.FontProperties(size=11)
marker_size=7
loc_label = "upper right"
visible_x, visible_y = True, True
grid=False
ymin, ymax = -0.0001, 0.0001
m_req=1e-3
c_req=1e-4
color_vec=['r', 'y', 'g', 'c', 'b', 'm', 'k']
#color_dict={0.0:'r', 0.025:'k', 0.05:'b', 0.075:'m', 0.08:'c', 0.1:'g'}
color_dict_e={}
for i,e in enumerate(e_vec):
color_dict_e[e]=color_vec[i%len(color_vec)]
color_dict_m={}
for i,m_gal in enumerate(m_gal_vec):
color_dict_m[m_gal]=color_vec[i%len(color_vec)]
color_vec_lam=['m','b', 'c', 'g', 'y', 'r']
color_dict_lam={}
for i,lam in enumerate(wavelength_dict):
color_dict_lam[lam]=color_vec_lam[i%len(color_vec_lam)]
alpha=1.0
plot_positions_six={'Z087':321,'Y106':322, 'J129':323, 'W149':324, 'H158':325, 'F184':326}
## Theory for Delta R / R
#def theory_size_gauss (sigma, beta, flux_vec):
# sigma/=(pixel_scale) ### Convert to pixels?
# return ( (8*math.pi - beta*flux_vec/(sigma**2) ) / (8*math.pi - 2*beta*flux_vec/(sigma**2) ) ) - 1
#flux_vec=flux_dict['H158']*2.512**( m_zero - np.array(mag_gal_vec) )
#ratio_vec= theory_size_gauss (gal_sigma, beta_vec, flux_vec )
def add_subplot_axes(ax,rect,axisbg='w'):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
transFigure = fig.transFigure.inverted()
infig_position = transFigure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3] # <= Typo was here
subax = fig.add_axes([x,y,width,height],axisbg=axisbg)
x_labelsize = subax.get_xticklabels()[0].get_size()
y_labelsize = subax.get_yticklabels()[0].get_size()
x_labelsize *= rect[2]**0.5
y_labelsize *= rect[3]**0.5
subax.xaxis.set_tick_params(labelsize=x_labelsize)
subax.yaxis.set_tick_params(labelsize=y_labelsize)
return subax
def plot_function_e_and_r (fig, x1_vec, y1_vec, x2_vec, y2_vec, xlabel1='', xlabel2='', ylabel1=r"$\Delta$e", ylabel2=r"$\Delta$R/R", lam_key='', e_key=(0.0, 0.0), m_key='', label_bool=False):
color_fmt=color_dict_lam[lam_key]
#plot_pos=plot_positions_six[lam_key]
#label='e=(%g, %g)' %(e_key[0], e_key[1])
label=lam_key
#print "x1_vec, y1_vec, x2_vec, y2_vec", x1_vec, y1_vec, x2_vec, y2_vec
#fig = plt.figure()
ax = fig.add_subplot (211)
ax.errorbar( x1_vec, y1_vec, yerr=None, ecolor = color_fmt, label=label, fmt=color_fmt+'s-', markersize=marker_size, alpha=alpha)
#ax.errorbar( x_vec, y2_vec, yerr=None, ecolor = color_fmt, label='e2=%g'%e_key[1], fmt=color_fmt+'x-', markersize=marker_size, alpha=alpha)
plt.axhline(y=0.,color='k',ls='solid')
#plt.axhspan(-m_req, m_req, facecolor='0.5', alpha=0.3)
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(xlabel1, visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(ylabel1, visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('linear')
#plt.ylim ([ymin, ymax])
xmin, xmax=plt.xlim()
delta=(xmax-xmin)
plt.xlim ([xmin - 0.02*delta, xmax + 0.02*delta])
#plt.title(lam_key+" (%g $\mu$m)"%wavelength_dict[lam], fontsize=11)
#if plot_pos== 321:
if label_bool:
ax.legend(loc=loc_label , fancybox=True, ncol=2, numpoints=1, prop = prop)
#plt.grid(grid, which='both', ls='-', alpha=0.5)
plt.grid(grid)
ax = fig.add_subplot (212)
ax.errorbar( x2_vec, y2_vec, yerr=None, ecolor = color_fmt, label=label, fmt=color_fmt+'o-', markersize=marker_size, alpha=alpha)
#ax.errorbar( x_vec, theory_delta_r_gauss, yerr=None, ecolor = 'k', label='theory Gauss', fmt='r-', markersize=marker_size, alpha=1.)
plt.axhline(y=0.,color='k',ls='solid')
#if label_bool:
#plt.axhline(y=1e-4, color='r',ls='-', label='1e-4') # requirement
#ax.errorbar(x_vec, ratio_vec, yerr=None, ecolor = 'b', label='Theory', fmt='bo-', markersize=marker_size, alpha=alpha)
#plt.axhspan(-m_req, m_req, facecolor='0.5', alpha=0.3)
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(xlabel2, visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(ylabel2, visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('log')
plt.ylim ([10, 2e5])
xmin, xmax=plt.xlim()
delta=(xmax-xmin)
plt.xlim ([xmin - 0.02*delta, xmax + 0.02*delta])
#if type=='o':
#plt.ylim ([0., 0.026])
#plt.ylim([0., 0.18e-4])
#plt.title(lam_key+" (%g $\mu$m)"%wavelength_dict[lam], fontsize=11)
#if plot_pos== 324:
if label_bool:
ax.legend(loc=loc_label , fancybox=True, ncol=2, numpoints=1, prop = prop)
#Inset with zoom
#subpos = [0.35, 0.30, 0.475, 0.35]
#subax1 = add_subplot_axes(ax,subpos)
#subax1.plot (x_vec, y3_vec, color_fmt+'o-', markersize=marker_size, alpha=alpha)
#subax1.plot (x_vec, ratio_vec,'bo-', markersize=marker_size, alpha=alpha)
#subax1.axhline(y=1e-4, color='r',ls='--')
#plt.ylim([-1e-4, 3e-4])
#if type == 'o':
# plt.xlim ([22, 24.5])
#else:
# plt.xlim ([21.8, 24.2])
# subax1.set_yticklabels(subax1.get_yticks(), size=5, visible=True)
# subax1.set_xticklabels(subax1.get_xticks(), size=5, visible=True)
def plot_function_e (fig, x_vec, y1_vec, y2_vec, string='', xlabel='', y1label=r"$\Delta$e", label_string='', lam_key='', e_key=(0.0,0.0), m_key=''):
color_fmt=color_dict_mag[m_key]
plot_pos=plot_positions_six[lam_key]
label='e1=%g, m=%g'%(e_key,m_key)
#label='e1=%g'%e_key
label2='e2=%g, m=%g'%(e_key,m_key)
#label2='e2=%g'%e_key
ax = fig.add_subplot (plot_pos)
ax.errorbar( x_vec, y1_vec, yerr=None, ecolor = color_fmt, label=label, fmt=color_fmt+'s-', markersize=marker_size, alpha=alpha)
ax.errorbar( x_vec, y2_vec, yerr=None, ecolor = color_fmt, label=label2, fmt=color_fmt+'x-', markersize=marker_size, alpha=alpha)
plt.axhline(y=0.,color='k',ls='solid')
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(xlabel, visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(y1label, visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('linear')
#plt.ylim ([ymin, ymax])
xmin, xmax=plt.xlim()
delta=(xmax-xmin)
plt.xlim ([xmin-0.03*delta, xmax + 0.03*delta])
plt.title(lam_key+" (%g $\mu$m)"%wavelength_dict[lam], fontsize=10)
if plot_pos== 321:
ax.legend(loc=loc_label , fancybox=True, ncol=2, numpoints=1, prop = prop)
#plt.grid(grid, which='both', ls='-', alpha=0.5)
plt.grid(grid)
def plot_function_r (fig, x_vec, y3_vec, xlabel='', y2label=r"$\Delta$R/R", lam_key='',m_key='', e_key=0.0, type='o'):
color_fmt=color_dict_mag [m_key]
plot_pos=plot_positions_six[lam_key]
ax = fig.add_subplot (plot_pos)
label='m=%g'%(m_key)
#label='e1=e2=%g'%(e_key)
ax.errorbar( x_vec, y3_vec, yerr=None, ecolor = color_fmt, label=label, fmt=color_fmt+'o-', markersize=marker_size, alpha=alpha)
plt.axhline(y=0.,color='k',ls='solid')
plt.axhline(y=1e-4, color='r',ls='--') # requirement
plt.axvline(x=beta0, color='b',ls='--') # nominal beta
#plt.axhspan(-m_req, m_req, facecolor='0.5', alpha=0.3)
ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x)
lx=ax.set_xlabel(xlabel, visible=visible_x)
#lx.set_fontsize(font_size)
ax.set_xscale('linear')
ax.set_yticklabels(ax.get_yticks(), visible= visible_y)
ly=ax.set_ylabel(y2label, visible=visible_y)
#ly.set_fontsize(font_size)
ax.set_yscale('linear')
#plt.ylim ([ymin, ymax])
xmin, xmax=plt.xlim()
delta=(xmax-xmin)
plt.xlim ([xmin-0.03*delta, xmax + 0.06*delta])
if type=='o':
plt.ylim ([0., 0.009])
plt.title(lam_key+" (%g $\mu$m)"%wavelength_dict[lam], fontsize=10)
if plot_pos== 324:
ax.legend(loc=loc_label , fancybox=True, ncol=1, numpoints=1, prop = prop)
#plt.grid(grid, which='both', ls='-', alpha=0.5)
#Inset with zoom
subpos = [1-0.275, 0.15, 0.29, 0.375]
if type == 'o':
subpos = [1-0.275, 0.25, 0.29, 0.375]
subax1 = add_subplot_axes(ax,subpos)
subax1.plot ( x_vec, y3_vec, color_fmt+'o-', label=label, markersize=marker_size, alpha=alpha)
subax1.axhline(y=1e-4, color='r',ls='--')
plt.ylim([0., 3e-4])
if type == 'o':
plt.xlim ([7e-9, 3e-8])
else:
plt.xlim ([1e-8, 6e-8])
subax1.set_yticklabels(subax1.get_yticks(), size=3.9, visible=True)
subax1.set_xticklabels(subax1.get_xticks(), size=3.9, visible=True)
if plot_pos in [322,324,326]:
subax1.yaxis.set_label_position("right")
subax1.yaxis.tick_right()
subax1.set_yticklabels(subax1.get_yticks(), size=4, visible=True)
subax1.set_xticklabels(subax1.get_xticks(), size=3.9, visible=True)
#pp.savefig()
plot_positions_six={'Z087':321,'Y106':322, 'J129':323, 'W149':324, 'H158':325, 'F184':326}
if type == 'bf':
string_g= "BF: BaseCDModel" + "\n" + "Gaussian ($\sigma$=%g'')* Pixel (0.11 arcsec/pix), no noise." %(gal_sigma)
string_o= "BF: BaseCDModel" + "\n" + "Optical (tel_diam=%g m, obscuration=%g) * Pixel (0.11 arcsec/pix), no noise. "%(tel_diam, obscuration_optical)
elif type == 'nl':
string_g= r"Non-linearity: $f=x-\beta x^{2}$ " + "\n" + "Gaussian ($\sigma$=%g'') * Pixel (0.11 arcsec/pix), no noise." %(gal_sigma)
string_o= r"Non-linearity: $f=x-\beta x^{2}$ " + "\n" + "Optical (tel_diam=%g m, obscuration=%g) * Pixel (0.11 arcsec/pix), no noise. "%(tel_diam, obscuration_optical)
else:
print "invalid type (nor 'bf' nor 'nl')"
sys.exit(1)
# + r"($\beta$=%g)" %(beta0)
def get_slope (x, y):
fitfunc = lambda p, x: p[0]*x
errfunc = lambda p, x, y: fitfunc(p, x) - y
p0 = [1.]
p1, success = optimize.leastsq(errfunc, p0[:], args=(x,y))
print 'pendiente:', p1[0]
return p1[0]
dic_list=[optical_no_noise]
#e_vec_temp=[]
#for var in e_vec:
# e_vec_temp.append(var[0])
for dictionary in dic_list:
beta_vec=np.array(beta_vec)
slope_dict={}
for lam in wavelength_dict:
slope_dict[lam] =[[],[]] #slope_e1, slope_r
## Gaussian no noise, Delta_e, one filter
fig = plt.figure()
for lam in wavelength_dict:
for m_gal in m_gal_vec:
slope_e1= get_slope ( beta_vec, dictionary[lam][m_gal][0]) #delta_e1
slope_dict[lam][0].append(slope_e1)
slope_r= get_slope (beta_vec, dictionary[lam][m_gal][2]) #delta_r
slope_dict[lam][1].append(slope_r)
for lam in wavelength_dict:
print "lam", lam
plot_function_e_and_r (fig, m_gal_vec, slope_dict[lam][0] , m_gal_vec, slope_dict[lam][1], xlabel1='$m$', xlabel2='$m$', ylabel1=r"$\Delta e_1$/$\beta$", ylabel2=r"$\Delta R/R/\beta$", lam_key=lam, m_key=m_gal, label_bool=True)
plt.suptitle(string_o, fontsize=13)
fig.tight_layout()
plt.subplots_adjust(top=0.85)
pp.savefig(fig)
plt.close()
"""
## Gaussian no noise, Delta_e, all filters
fig = plt.figure()
for lam in wavelength_dict:
for e in e_vec: # One single value
for m_gal in m_gal_vec:
plot_function_e (fig, beta_vec , gauss_no_noise[lam][m_gal][0],gauss_no_noise[lam][m_gal][1], xlabel=r"$\beta$", lam_key=lam, e_key=e, m_key=m_gal)
string="Gaussian($\sigma$=%g'')*Pixel, no noise. " %(gal_sigma) +r"$f=x-\beta x^{2}$"+"\n Object flux: gal_flux=6e4*2.521**(%g-%g)" %( m_zero, m_gal)
plt.suptitle(string, fontsize=11)
fig.tight_layout()
plt.subplots_adjust(top=0.85)
pp.savefig(fig)
## Gaussian no noise, Delta_R/R, all filters
fig = plt.figure()
for lam in wavelength_dict:
for e in e_vec: # One single value
for m_gal in m_gal_vec:
plot_function_r (fig, beta_vec , gauss_no_noise[lam][m_gal][2], xlabel=r"$\beta$", lam_key=lam, e_key=e, m_key=m_gal)
string="Gaussian($\sigma$=%g'')*Pixel, no noise. " %(gal_sigma) +r"$f=x-\beta x^{2}$"+"\n Object flux: gal_flux=6e4*2.521**(%g-%g)" %( m_zero, m_gal)
plt.suptitle(string, fontsize=11)
fig.tight_layout()
plt.subplots_adjust(top=0.85)
pp.savefig(fig)
## Optical no noise, Delta_e, all filters
fig = plt.figure()
for lam in wavelength_dict:
for e in e_vec: # One single value
for m_gal in m_gal_vec:
plot_function_e (fig, beta_vec, optical_no_noise[lam][m_gal][0], optical_no_noise[lam][m_gal][1], xlabel=r"$\beta$", lam_key=lam, e_key=e, m_key=m_gal)
string="Optical(tel_diam=%g m)*Pixel, no noise. "%(tel_diam) + r"$f=x-\beta x^{2}$," +"\n Object flux: gal_flux=6e4*2.521**(%g-%g)" %(m_zero, m_gal)
plt.suptitle(string, fontsize=11)
fig.tight_layout()
plt.subplots_adjust(top=0.85)
pp.savefig(fig)
## Optical no noise, Delta_R/R, all filters
fig = plt.figure()
for lam in wavelength_dict:
for e in e_vec: # One single value
for m_gal in m_gal_vec:
plot_function_r (fig, beta_vec , optical_no_noise[lam][m_gal][2], xlabel=r"$\beta$", lam_key=lam, e_key=e, m_key=m_gal, type='o')
string="Optical(tel_diam=%g m)*Pixel, no noise. "%(tel_diam) + r"$f=x-\beta x^{2}$," + "\n Object flux: gal_flux=6e4*2.521**(%g-%g)" %(m_zero, m_gal)
plt.suptitle(string, fontsize=11)
fig.tight_layout()
plt.subplots_adjust(top=0.85)
pp.savefig(fig)
"""
"""
fig=plt.figure()
for e in e_vec:
for m_gal in m_gal_vec:
plot_function (fig,beta_vec, gauss_noise[m_gal][0],gauss_noise[m_gal][1],gauss_noise[m_gal][2], string="Gaussian*Pixel, noise. " +r"$f=x-\beta x^{2}$", xlabel=r"$\beta$", e_key=e, m_key=m_gal)
pp.savefig(fig)
fig=plt.figure()
for e in e_vec:
for m_gal in m_gal_vec:
plot_function (fig, beta_vec, optical_no_noise[m_gal][0], optical_no_noise[m_gal][1], optical_no_noise[m_gal][2], string="Optical($\lambda$=%g nm, tel_diam=%g m)*Pixel, no noise. "%(lam,tel_diam) +r"$f=x-\beta x^{2}$" , xlabel=r"$\beta$", e_key=e, m_key=m_gal)
pp.savefig(fig)
fig=plt.figure()
for e in e_vec:
for m_gal in m_gal_vec:
plot_function (fig, beta_vec, optical_noise[m_gal][0],optical_noise[m_gal][1],optical_noise[m_gal][2], string="Optical*Pixel, noise. " +r"$f=x-\beta x^{2}$" , xlabel=r"$\beta$", e_key=e, m_key=m_gal)
pp.savefig(fig)
"""
pp.close()
| mit |
aewhatley/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/io/tests/test_pickle.py | 9 | 7588 | # pylint: disable=E1101,E1103,W0232
""" manage legacy pickle tests """
from datetime import datetime, timedelta
import operator
import pickle as pkl
import nose
import os
from distutils.version import LooseVersion
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from pandas import Index
from pandas.sparse.tests import test_sparse
from pandas import compat
from pandas.compat import u
from pandas.util.misc import is_little_endian
import pandas
from pandas.tseries.offsets import Day, MonthEnd
class TestPickle():
"""
How to add pickle tests:
1. Install pandas version intended to output the pickle.
2. Execute "generate_legacy_storage_files.py" to create the pickle.
$ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
NOTE: TestPickle can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
_multiprocess_can_split_ = True
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import create_pickle_data
self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, result, expected, typ, version=None):
if isinstance(expected,Index):
tm.assert_index_equal(expected, result)
return
if typ.startswith('sp_'):
comparator = getattr(test_sparse,"assert_%s_equal" % typ)
comparator(result,expected,exact_indices=False)
else:
comparator = getattr(tm,"assert_%s_equal" % typ,tm.assert_almost_equal)
comparator(result,expected)
def compare(self, vf, version):
# py3 compat when reading py2 pickle
try:
data = pandas.read_pickle(vf)
except (ValueError) as e:
if 'unsupported pickle protocol:' in str(e):
# trying to read a py3 pickle in py2
return
else:
raise
for typ, dv in data.items():
for dt, result in dv.items():
try:
expected = self.data[typ][dt]
except (KeyError):
continue
# use a specific comparator
# if available
comparator = getattr(self,"compare_{typ}_{dt}".format(typ=typ,dt=dt), self.compare_element)
comparator(result, expected, typ, version)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def read_pickles(self, version):
if not is_little_endian():
raise nose.SkipTest("known failure on non-little endian")
pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
data = self.compare(vf, version)
if data is None:
continue
if 'series' in data:
if 'ts' in data['series']:
self._validate_timeseries(data['series']['ts'], self.data['series']['ts'])
self._validate_frequency(data['series']['ts'])
if 'index' in data:
if 'period' in data['index']:
self._validate_periodindex(data['index']['period'],
self.data['index']['period'])
n += 1
assert n > 0, 'Pickle files are not tested'
def test_pickles(self):
pickle_path = tm.get_data_path('legacy_pickle')
n = 0
for v in os.listdir(pickle_path):
pth = os.path.join(pickle_path, v)
if os.path.isdir(pth):
yield self.read_pickles, v
n += 1
assert n > 0, 'Pickle files are not tested'
def test_round_trip_current(self):
try:
import cPickle as c_pickle
def c_pickler(obj,path):
with open(path,'wb') as fh:
c_pickle.dump(obj,fh,protocol=-1)
def c_unpickler(path):
with open(path,'rb') as fh:
fh.seek(0)
return c_pickle.load(fh)
except:
c_pickler = None
c_unpickler = None
import pickle as python_pickle
def python_pickler(obj,path):
with open(path,'wb') as fh:
python_pickle.dump(obj,fh,protocol=-1)
def python_unpickler(path):
with open(path,'rb') as fh:
fh.seek(0)
return python_pickle.load(fh)
for typ, dv in self.data.items():
for dt, expected in dv.items():
for writer in [pd.to_pickle, c_pickler, python_pickler ]:
if writer is None:
continue
with tm.ensure_clean(self.path) as path:
# test writing with each pickler
writer(expected,path)
# test reading with each unpickler
result = pd.read_pickle(path)
self.compare_element(result, expected, typ)
if c_unpickler is not None:
result = c_unpickler(path)
self.compare_element(result, expected, typ)
result = python_unpickler(path)
self.compare_element(result, expected, typ)
def _validate_timeseries(self, pickled, current):
# GH 7748
tm.assert_series_equal(pickled, current)
tm.assert_equal(pickled.index.freq, current.index.freq)
tm.assert_equal(pickled.index.freq.normalize, False)
tm.assert_numpy_array_equal(pickled > 0, current > 0)
def _validate_frequency(self, pickled):
# GH 9291
freq = pickled.index.freq
result = freq + Day(1)
tm.assert_equal(result, Day(2))
result = freq + pandas.Timedelta(hours=1)
tm.assert_equal(isinstance(result, pandas.Timedelta), True)
tm.assert_equal(result, pandas.Timedelta(days=1, hours=1))
result = freq + pandas.Timedelta(nanoseconds=1)
tm.assert_equal(isinstance(result, pandas.Timedelta), True)
tm.assert_equal(result, pandas.Timedelta(days=1, nanoseconds=1))
def _validate_periodindex(self, pickled, current):
tm.assert_index_equal(pickled, current)
tm.assertIsInstance(pickled.freq, MonthEnd)
tm.assert_equal(pickled.freq, MonthEnd())
tm.assert_equal(pickled.freqstr, 'M')
tm.assert_index_equal(pickled.shift(2), current.shift(2))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| gpl-3.0 |
aerijman/Transcriptional-Activation-Domains | empirical_null_model/libraries/predictions_library.py | 2 | 5993 | import pandas as pd
import numpy as np
import re,os
import matplotlib.pyplot as plt
##################################### MISC. FUNCTIONS ##################################
# list of one-letter aa
aa = ['R','H','K','D','E','S','T','N','Q','A','V','L','I','M','F' ,'Y', 'W', 'C','G','P']
'''Split data into Train, Test and Validation sets with ratio 8:1:1'''
def split_data(idx=None, data=None, selection=aa, column_label_score_NAME='scores', seed=42):
train, test = (np.array([0.8, 0.1]) * data.shape[0]).astype(int)
X, y = data[selection].values, data[column_label_score_NAME].values
X, y = X[idx], y[idx]
Xtrain, ytrain = X[:train], y[:train]
Xtest, ytest = X[train:-test], y[train:-test]
Xvalid, yvalid = X[-test:], y[-test:]
return(Xtrain, ytrain, Xtest, ytest, Xvalid, yvalid)
'''given X and y it outputs an array of thresholds, False positive rate and True positive rate'''
def roc_curve(X,y):
# make sure X and y are numpy arrays
X,y = np.array(X), np.array(y)
# thresholds to use for defining fpr and tpr
scale = np.linspace(X.min(), X.max(), 50)
# define fpr and tpr functions to use with different thresholds
def tpr(y_hat, y):
a,b = set(np.hstack(np.where(y==1))), set(np.hstack(np.where(y_hat==1)))
return(len(a & b) / len(a))
def fpr(y_hat, y):
a,b = set(np.hstack(np.where(y==0))), set(np.hstack(np.where(y_hat==1)))
return(len(a & b) / len(a))
# Core of the function where TPR and FPR are calculated
TPR, FPR = np.zeros(50), np.zeros(50)
for n,s in enumerate(scale):
# threshold of variable defines what is + and -
y_hat = np.array([1 if x>=s else 0 for x in X])
TPR[n] = tpr(y_hat, y)
FPR[n] = fpr(y_hat, y)
# the resulting array will contain thresholds, fpr and tpr
roc = np.vstack([scale, FPR, TPR])
return(roc)
# I have this function to compare upon any doubt but I will be using the sklearn implementation
from sklearn.metrics import roc_auc_score
''' Open the new lukasz's files with sequences as index and two columns, scores and normalized scores'''
def open_fastas(ff):
vals, keys = [], []
fasta = open(ff)
while True:
try:
header, sequence = next(fasta).strip(), next(fasta).strip()
_, bg, b1, b2, b3, b4, score, norm_score = header.split("_")
vals.append([score, norm_score])
keys.append(sequence)
except StopIteration: break
df = pd.DataFrame(vals, index=keys, columns=['scores','norm_scores'])
df = df.astype(float)
return df
'''one hot encooding of data'''
def one_hot_encode(seq):
X = np.zeros(shape=(len(seq),len(aa)))
try:
for n,i in enumerate(seq): X[n, aa.index(i)] = 1
return X
except exception as e:
print('exception {} encountered'.format(e))
#################################### NEWER FUNCTIONS ########################################
''' Load psipred and iupred predictions '''
def load_predicted_properties(fileName):
k, v = [],[]
with open(fileName,"r") as f:
while 1:
try:
_1, sequence, prediction, _2 = next(f), next(f), next(f), next(f)
k.append(sequence.strip())
v.append([i for i in prediction.strip()])
except StopIteration:
print(fileName+' loaded succesfully!')
break
df = pd.DataFrame(v, index=k)
return(df)
# load netsurf data
def load_netsurf(fileName):
k,v1,v2 = [],[],[]
with open(fileName, "r") as f:
while 1:
try:
_1, seq, acc, _2, ss, _3 = next(f), next(f), next(f), next(f), next(f), next(f)
k.append(seq.strip())
v1.append([i for i in acc.strip()])
v2.append([i for i in ss.strip()])
except StopIteration:
print(fileName+'load succesfully!')
break
acc = pd.DataFrame(v1, index=k)
ss =pd.DataFrame(v2, index=k)
return ss, acc
# one hot encode the data
def ohe(seq, property_dict):
tmp = np.zeros(shape=(len(seq), len(property_dict)))
for n,i in enumerate(seq):
tmp[n, property_dict.index(i)] = 1
return tmp
################################### PREDICTOR FUNCTIONS #######################################
def predict(model, sequence):
# apend the 1st 15 residues to the "N" terminal and the last 15 residues to the "C" terminal
sequence = sequence[:15] + sequence + sequence[-15:]
# declare array of probabilities of being TAD per aminoacid
probas = []
# loop sliding over each position and taking the 30 aa (position is the 15t)
for i in range(len(sequence)-30):
test_window = sequence[i:i+30]
ohe_test_window = ohe(test_window, aa).reshape(1,600)
probas.append( model.predict_proba(ohe_test_window)[0][0] )
return np.array(probas)
def print_prediction(probabilities, sequence, wide=120):
prediction = ''.join(['*' if i>0.8 else ' ' for i in probabilities])
while len(sequence)>0:
print(prediction[:wide]+'\n'+sequence[:wide])
prediction = prediction[wide:]
sequence = sequence[wide:]
def plot_prediction(probabilities):
filtered = np.array([i if i >0.8 else 0 for i in probabilities])
filtered = np.convolve(filtered, np.ones(15)/15, "same")
#plt.subplot(121);
plt.plot(filtered); plt.title('filtered'); plt.ylim(0.1)
plt.xlabel('position'); plt.ylabel('TAD probability');
#plt.subplot(122); plt.plot(probabilities); plt.title('original'); plt.ylim(0.1)
def protein_from_sgd(name):
sequence = []
found = False
for i in open('../from_scratch/orf_trans.fasta'):
if i[0]=='>':
if found and i[0]=='>': break
if re.search(name,i[:20]) and not found: found = True
if found and i[0] != '>': sequence.append(i.strip().strip('*'))
return ''.join(sequence) | mit |
musically-ut/statsmodels | statsmodels/iolib/summary2.py | 21 | 19583 | from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
reduce, itervalues, zip, string_types,
range)
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
import datetime
import textwrap
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format: string
Formatting to float data columns
align : string
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format: string
Formatting to array if type is float
align : string
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : string
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, string_types):
self.title = title
else:
try:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
except:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: string
Float formatting for summary of parameters (optional)
title : string
Title of the summary table (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
#TODO: this isn't used anywhere?
rule_dash = widest * '-'
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
#TODO: this isn't used anywhere
title = self.title
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '} \\\\'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = tables[i].shape[1] - 1
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff
_model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM' : 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
info['R-squared:'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared:'] = lambda x: "%#8.3f" % x.rsquared_adj
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
# NOTE: some models don't have loglike defined (RLM), so that's NIE
except (AttributeError, KeyError, NotImplementedError):
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True):
'''Stack coefficients and standard errors in single column
'''
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.ix[:, 1] = '(' + res.ix[:, 1] + ')'
# Significance stars
if stars:
idx = res.ix[:, 3] < .1
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .05
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .01
res.ix[idx, 0] = res.ix[idx, 0] + '*'
# Stack Coefs and Std.Errors
res = res.ix[:, :2]
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=[], stars=False,
info_dict=None, regressor_order=[]):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : string
float format for coefficients and standard errors
Default : '%.4f'
model_names : list of strings of length len(results) if the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of lambda functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list of strings
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
f = lambda idx: sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(np.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df , name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry.add_df(summ, header=True, align='l')
smry.add_text('Standard errors in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except:
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.ix[:, 0] = [str(x) + int(pad_index) * ' ' for x in dat.ix[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
| bsd-3-clause |
cginternals/glkernel | benchmarking/visualize.py | 1 | 3064 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 13 12:28:50 2018
@author: florian
"""
#%%
import matplotlib.figure
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import itertools
import seaborn as sns
import pandas as pd
from numpy import median
#%%
def load_data(filepath):
raw = read_in(filepath)
return process_lines(raw)
#%%
def read_in(filename):
with open(filename) as f:
raw = []
for line in f.readlines():
raw.append(line.strip())
return raw
#%%
def process_lines(raw):
offset = 4
cpu_index = 3
name_index = 0
processed_elements = []
for line in raw[offset:]:
elements = line.strip().split(',')
name = elements[name_index].strip('"')
cpu_time = elements[cpu_index]
split_name = name.split('_')
if(len(split_name) == 3):
size = name.split('/')[1] + ('²')
label = name.split('/')[0].split('_')[1]
processed_elements.append((label,size,cpu_time))
return processed_elements
#%%
def load_processed(filepath):
processed = load_data(filepath)
times = []
sizes = []
time_per_element = []
functions = []
for k, g in itertools.groupby(processed, lambda el: (el[0],el[1])):
for el in g:
times.append(float(el[2]))
sizes.append(k[1])
time_per_element.append(float(el[2]) / float(k[1][:-1]) ** 2)
functions.append(k[0])
return times, sizes, functions, time_per_element
#%%
def draw_boxplots(fig, df):
ax = fig.axes[0]
ax = sns.barplot(x="kernel size", y="time_per_element", hue="omp", data=df, ax=ax)
ax.get_yaxis().set_label_text("Time (ns)")
# ax.set_ylim(0, 3000000)
# ax.set_yscale('log')
def create_fig(function):
fig = matplotlib.figure.Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
# ax.set_yscale('log')
ax.set_title('Time per element ({})'.format(function))
sns.despine(ax=ax)
return fig
def save_fig(fig, filename):
fig.savefig(filename)
#%%
def flatten(list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
#%%
if __name__ == '__main__':
pass
no_omp_proc = load_processed("./no_omp_result.csv")
omp_proc = load_processed("./omp_result.csv")
sns.set(context="talk", style="white")
data = {"kernel size": no_omp_proc[1] + omp_proc[1],
"times": no_omp_proc[0] + omp_proc[0],
"functions": no_omp_proc[2] + omp_proc[2],
"time_per_element": no_omp_proc[3] + omp_proc[3],
"omp": ["No"]*len(no_omp_proc[0]) + ["Yes"] * len(omp_proc[0])}
df = pd.DataFrame(data=data)
#%%
for function, group in df.groupby(['functions']):
fig = create_fig(function)
draw_boxplots(fig, group)
save_fig(fig, "bench_{}.png".format(function))
| mit |
Saurabh7/shogun | examples/undocumented/python_modular/graphical/preprocessor_kpca_graphical.py | 26 | 1893 | from numpy import *
import matplotlib.pyplot as p
import os, sys, inspect
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from generate_circle_data import circle_data
cir=circle_data()
number_of_points_for_circle1=42
number_of_points_for_circle2=122
row_vector=2
data=cir.generate_data(number_of_points_for_circle1,number_of_points_for_circle2,row_vector)
d=zeros((row_vector,number_of_points_for_circle1))
d2=zeros((row_vector,number_of_points_for_circle2))
d=[data[i][0:number_of_points_for_circle1] for i in range(0,row_vector)]
d2=[data[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(0,row_vector)]
p.plot(d[1][:],d[0][:],'x',d2[1][:],d2[0][:],'o')
p.title('input data')
p.show()
parameter_list = [[data,0.01,1.0], [data,0.05,2.0]]
def preprocessor_kernelpca_modular (data, threshold, width):
from modshogun import RealFeatures
from modshogun import KernelPCA
from modshogun import GaussianKernel
features = RealFeatures(data)
kernel=GaussianKernel(features,features,width)
preprocessor=KernelPCA(kernel)
preprocessor.init(features)
preprocessor.set_target_dim(2)
#X=preprocessor.get_transformation_matrix()
X2=preprocessor.apply_to_feature_matrix(features)
lx0=len(X2)
modified_d1=zeros((lx0,number_of_points_for_circle1))
modified_d2=zeros((lx0,number_of_points_for_circle2))
modified_d1=[X2[i][0:number_of_points_for_circle1] for i in range(lx0)]
modified_d2=[X2[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(lx0)]
p.plot(modified_d1[0][:],modified_d1[1][:],'o',modified_d2[0][:],modified_d2[1][:],'x')
p.title('final data')
p.show()
return features
if __name__=='__main__':
print('KernelPCA')
preprocessor_kernelpca_modular(*parameter_list[0])
| mit |
mrichart/NNcoloring | src/train_NN_cv.py | 1 | 3993 | import pickle
import gc
import numpy as np
from sknn.mlp import Classifier, Convolution, Layer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
patch_sizes = [[3,3]] #[[3,3], [5,5], [11,11]] #patches to use
som_sizes = [[3,3],[10,10],[20,20]] #soms to use
training_sample_size = 50000
test_sample_size = 5000
def trainNN (X, y):
print (' NN X:' + str(X.shape) + ' y:' + str(y.shape))
print (' training_sample_size:' + str(training_sample_size) + ' test_sample_size:' + str(test_sample_size))
# nn classifier definition
#nn = Classifier(
# layers=[
# #Layer("Rectifier", units=30),
# #Layer("Rectifier", units=30),
# #Convolution("Rectifier", channels=16, kernel_shape=(5,5)),
# Layer("Rectifier", units=10),
# Layer("Rectifier", units=5),
# Layer("Softmax")],
# learning_rate=0.03, #0.02,
# n_iter=1000)
sss = StratifiedShuffleSplit(n_splits=5, test_size=test_sample_size, train_size=training_sample_size)
from sklearn.neural_network import MLPClassifier
nn = MLPClassifier(
hidden_layer_sizes=(500,500,),
#random_state=1,
max_iter=1000,
learning_rate_init = 0.001,
verbose = True,
) #warm_start=False)
#from sklearn.neural_network import MLPClassifier
#nn = MLPClassifier(solver='lbfgs', alpha=1e-5,
# hidden_layer_sizes=(15,), random_state=1)
# scalling + classification
pipeline = Pipeline([
('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),
('neural network', nn)])
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y)
#print (' Xtr:' + str(X.shape) + ' ytr:' + str(y.shape))
X_train, y_train = X, np.ravel(y)
#pipeline.fit(X_train, y_train)
#for i in range(10000):
# pipeline.fit(X_train, y_train)
# if i%100 == 0:
# print(i)
scores = cross_val_score(pipeline, X_train, y_train, cv=sss)
#for train_index, test_index in sss.split(X_train, y_train):
# print("TRAIN:", train_index, "TEST:", test_index)
# _X_train, _X_test = X_train[train_index], X_train[test_index]
# _y_train, _y_test = y_train[train_index], y_train[test_index]
# pipeline.fit(_X_train, _y_train)
# print(pipeline.score(_X_test,_y_test ))
# return pipeline
print (scores)
return pipeline
for som_size in som_sizes:
print ('Using som: [' + str(som_size[0]) + 'x' + str (som_size[1]) + ']')
for patch_size in patch_sizes:
print (' With patch size: [' + str(patch_size[0]) + ',' + str (patch_size[1])+']')
fileData = open ('pkls/data_patch' + str(patch_size[0]) + 'x' + str (patch_size[1])+'_som' + str(som_size[0]) + 'x' + str (som_size[1])+'.pkl','rb')
X = pickle.load (fileData)
fileData.close()
fileLabels = open ('pkls/labels_patch' + str(patch_size[0]) + 'x' + str (patch_size[1])+'_som' + str(som_size[0]) + 'x' + str (som_size[1])+'.pkl','rb')
y = pickle.load (fileLabels)
fileLabels.close()
#reducedData = np.array (random.sample (data.tolist(), int(data.size*ratio)))
print (' Original dataset: X:', X.shape, ' y:', y.shape)
#np.random.shuffle(X)
#np.random.shuffle(y)
#X=X[0:training_sample_size,:]
#y=y[0:training_sample_size,:]
gc.collect()
pipeline = trainNN(X, y)
#pickle.dump(pipeline, open('pkls/pipeline_patch' + str(patch_size[0]) + 'x' + str (patch_size[1])+'_som' + str(som_size[0]) + 'x' + str (som_size[1])+'.pkl', 'wb'))
X = None
y = None
pipeline = None
gc.collect()
| mit |
plotly/python-api | packages/python/plotly/plotly/figure_factory/_bullet.py | 2 | 13154 | from __future__ import absolute_import
import collections
import math
from plotly import exceptions, optional_imports
import plotly.colors as clrs
from plotly.figure_factory import utils
import plotly
import plotly.graph_objs as go
pd = optional_imports.get_module("pandas")
def _bullet(
df,
markers,
measures,
ranges,
subtitles,
titles,
orientation,
range_colors,
measure_colors,
horizontal_spacing,
vertical_spacing,
scatter_options,
layout_options,
):
num_of_lanes = len(df)
num_of_rows = num_of_lanes if orientation == "h" else 1
num_of_cols = 1 if orientation == "h" else num_of_lanes
if not horizontal_spacing:
horizontal_spacing = 1.0 / num_of_lanes
if not vertical_spacing:
vertical_spacing = 1.0 / num_of_lanes
fig = plotly.subplots.make_subplots(
num_of_rows,
num_of_cols,
print_grid=False,
horizontal_spacing=horizontal_spacing,
vertical_spacing=vertical_spacing,
)
# layout
fig["layout"].update(
dict(shapes=[]),
title="Bullet Chart",
height=600,
width=1000,
showlegend=False,
barmode="stack",
annotations=[],
margin=dict(l=120 if orientation == "h" else 80),
)
# update layout
fig["layout"].update(layout_options)
if orientation == "h":
width_axis = "yaxis"
length_axis = "xaxis"
else:
width_axis = "xaxis"
length_axis = "yaxis"
for key in fig["layout"]:
if "xaxis" in key or "yaxis" in key:
fig["layout"][key]["showgrid"] = False
fig["layout"][key]["zeroline"] = False
if length_axis in key:
fig["layout"][key]["tickwidth"] = 1
if width_axis in key:
fig["layout"][key]["showticklabels"] = False
fig["layout"][key]["range"] = [0, 1]
# narrow domain if 1 bar
if num_of_lanes <= 1:
fig["layout"][width_axis + "1"]["domain"] = [0.4, 0.6]
if not range_colors:
range_colors = ["rgb(200, 200, 200)", "rgb(245, 245, 245)"]
if not measure_colors:
measure_colors = ["rgb(31, 119, 180)", "rgb(176, 196, 221)"]
for row in range(num_of_lanes):
# ranges bars
for idx in range(len(df.iloc[row]["ranges"])):
inter_colors = clrs.n_colors(
range_colors[0], range_colors[1], len(df.iloc[row]["ranges"]), "rgb"
)
x = (
[sorted(df.iloc[row]["ranges"])[-1 - idx]]
if orientation == "h"
else [0]
)
y = (
[0]
if orientation == "h"
else [sorted(df.iloc[row]["ranges"])[-1 - idx]]
)
bar = go.Bar(
x=x,
y=y,
marker=dict(color=inter_colors[-1 - idx]),
name="ranges",
hoverinfo="x" if orientation == "h" else "y",
orientation=orientation,
width=2,
base=0,
xaxis="x{}".format(row + 1),
yaxis="y{}".format(row + 1),
)
fig.add_trace(bar)
# measures bars
for idx in range(len(df.iloc[row]["measures"])):
inter_colors = clrs.n_colors(
measure_colors[0],
measure_colors[1],
len(df.iloc[row]["measures"]),
"rgb",
)
x = (
[sorted(df.iloc[row]["measures"])[-1 - idx]]
if orientation == "h"
else [0.5]
)
y = (
[0.5]
if orientation == "h"
else [sorted(df.iloc[row]["measures"])[-1 - idx]]
)
bar = go.Bar(
x=x,
y=y,
marker=dict(color=inter_colors[-1 - idx]),
name="measures",
hoverinfo="x" if orientation == "h" else "y",
orientation=orientation,
width=0.4,
base=0,
xaxis="x{}".format(row + 1),
yaxis="y{}".format(row + 1),
)
fig.add_trace(bar)
# markers
x = df.iloc[row]["markers"] if orientation == "h" else [0.5]
y = [0.5] if orientation == "h" else df.iloc[row]["markers"]
markers = go.Scatter(
x=x,
y=y,
name="markers",
hoverinfo="x" if orientation == "h" else "y",
xaxis="x{}".format(row + 1),
yaxis="y{}".format(row + 1),
**scatter_options
)
fig.add_trace(markers)
# titles and subtitles
title = df.iloc[row]["titles"]
if "subtitles" in df:
subtitle = "<br>{}".format(df.iloc[row]["subtitles"])
else:
subtitle = ""
label = "<b>{}</b>".format(title) + subtitle
annot = utils.annotation_dict_for_label(
label,
(num_of_lanes - row if orientation == "h" else row + 1),
num_of_lanes,
vertical_spacing if orientation == "h" else horizontal_spacing,
"row" if orientation == "h" else "col",
True if orientation == "h" else False,
False,
)
fig["layout"]["annotations"] += (annot,)
return fig
def create_bullet(
data,
markers=None,
measures=None,
ranges=None,
subtitles=None,
titles=None,
orientation="h",
range_colors=("rgb(200, 200, 200)", "rgb(245, 245, 245)"),
measure_colors=("rgb(31, 119, 180)", "rgb(176, 196, 221)"),
horizontal_spacing=None,
vertical_spacing=None,
scatter_options={},
**layout_options
):
"""
**deprecated**, use instead the plotly.graph_objects trace
:class:`plotly.graph_objects.Indicator`.
:param (pd.DataFrame | list | tuple) data: either a list/tuple of
dictionaries or a pandas DataFrame.
:param (str) markers: the column name or dictionary key for the markers in
each subplot.
:param (str) measures: the column name or dictionary key for the measure
bars in each subplot. This bar usually represents the quantitative
measure of performance, usually a list of two values [a, b] and are
the blue bars in the foreground of each subplot by default.
:param (str) ranges: the column name or dictionary key for the qualitative
ranges of performance, usually a 3-item list [bad, okay, good]. They
correspond to the grey bars in the background of each chart.
:param (str) subtitles: the column name or dictionary key for the subtitle
of each subplot chart. The subplots are displayed right underneath
each title.
:param (str) titles: the column name or dictionary key for the main label
of each subplot chart.
:param (bool) orientation: if 'h', the bars are placed horizontally as
rows. If 'v' the bars are placed vertically in the chart.
:param (list) range_colors: a tuple of two colors between which all
the rectangles for the range are drawn. These rectangles are meant to
be qualitative indicators against which the marker and measure bars
are compared.
Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)')
:param (list) measure_colors: a tuple of two colors which is used to color
the thin quantitative bars in the bullet chart.
Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)')
:param (float) horizontal_spacing: see the 'horizontal_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (float) vertical_spacing: see the 'vertical_spacing' param in
plotly.tools.make_subplots. Ranges between 0 and 1.
:param (dict) scatter_options: describes attributes for the scatter trace
in each subplot such as name and marker size. Call
help(plotly.graph_objs.Scatter) for more information on valid params.
:param layout_options: describes attributes for the layout of the figure
such as title, height and width. Call help(plotly.graph_objs.Layout)
for more information on valid params.
Example 1: Use a Dictionary
>>> import plotly.figure_factory as ff
>>> data = [
... {"label": "revenue", "sublabel": "us$, in thousands",
... "range": [150, 225, 300], "performance": [220,270], "point": [250]},
... {"label": "Profit", "sublabel": "%", "range": [20, 25, 30],
... "performance": [21, 23], "point": [26]},
... {"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600],
... "performance": [100,320],"point": [550]},
... {"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500],
... "performance": [1000, 1650],"point": [2100]},
... {"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5],
... "performance": [3.2, 4.7], "point": [4.4]}
... ]
>>> fig = ff.create_bullet(
... data, titles='label', subtitles='sublabel', markers='point',
... measures='performance', ranges='range', orientation='h',
... title='my simple bullet chart'
... )
>>> fig.show()
Example 2: Use a DataFrame with Custom Colors
>>> import plotly.figure_factory as ff
>>> import pandas as pd
>>> data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json')
>>> fig = ff.create_bullet(
... data, titles='title', markers='markers', measures='measures',
... orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'],
... scatter_options={'marker': {'symbol': 'circle'}}, width=700)
>>> fig.show()
"""
# validate df
if not pd:
raise ImportError("'pandas' must be installed for this figure factory.")
if utils.is_sequence(data):
if not all(isinstance(item, dict) for item in data):
raise exceptions.PlotlyError(
"Every entry of the data argument list, tuple, etc must "
"be a dictionary."
)
elif not isinstance(data, pd.DataFrame):
raise exceptions.PlotlyError(
"You must input a pandas DataFrame, or a list of dictionaries."
)
# make DataFrame from data with correct column headers
col_names = ["titles", "subtitle", "markers", "measures", "ranges"]
if utils.is_sequence(data):
df = pd.DataFrame(
[
[d[titles] for d in data] if titles else [""] * len(data),
[d[subtitles] for d in data] if subtitles else [""] * len(data),
[d[markers] for d in data] if markers else [[]] * len(data),
[d[measures] for d in data] if measures else [[]] * len(data),
[d[ranges] for d in data] if ranges else [[]] * len(data),
],
index=col_names,
)
elif isinstance(data, pd.DataFrame):
df = pd.DataFrame(
[
data[titles].tolist() if titles else [""] * len(data),
data[subtitles].tolist() if subtitles else [""] * len(data),
data[markers].tolist() if markers else [[]] * len(data),
data[measures].tolist() if measures else [[]] * len(data),
data[ranges].tolist() if ranges else [[]] * len(data),
],
index=col_names,
)
df = pd.DataFrame.transpose(df)
# make sure ranges, measures, 'markers' are not NAN or NONE
for needed_key in ["ranges", "measures", "markers"]:
for idx, r in enumerate(df[needed_key]):
try:
r_is_nan = math.isnan(r)
if r_is_nan or r is None:
df[needed_key][idx] = []
except TypeError:
pass
# validate custom colors
for colors_list in [range_colors, measure_colors]:
if colors_list:
if len(colors_list) != 2:
raise exceptions.PlotlyError(
"Both 'range_colors' or 'measure_colors' must be a list "
"of two valid colors."
)
clrs.validate_colors(colors_list)
colors_list = clrs.convert_colors_to_same_type(colors_list, "rgb")[0]
# default scatter options
default_scatter = {
"marker": {"size": 12, "symbol": "diamond-tall", "color": "rgb(0, 0, 0)"}
}
if scatter_options == {}:
scatter_options.update(default_scatter)
else:
# add default options to scatter_options if they are not present
for k in default_scatter["marker"]:
if k not in scatter_options["marker"]:
scatter_options["marker"][k] = default_scatter["marker"][k]
fig = _bullet(
df,
markers,
measures,
ranges,
subtitles,
titles,
orientation,
range_colors,
measure_colors,
horizontal_spacing,
vertical_spacing,
scatter_options,
layout_options,
)
return fig
| mit |
mahajrod/MACE | MACE/Visualization/TrackGroups.py | 1 | 3534 |
from MACE.Visualization.Styles.TrackGroup import TrackGroupStyle, default_track_group_style
import math
from collections import Iterable, OrderedDict
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
class TrackGroup(OrderedDict):
def __init__(self, tracks=None, y_start=None, x_start=0, x_end=1, style=default_track_group_style,
label=None, x_scale_factor=1, y_scale_factor=1, auto_scale=False,
subplot_x_y_ratio=None, figure_x_y_ratio=None):
if tracks:
OrderedDict.__init__(self, tracks)
else:
OrderedDict.__init__(self)
self.y_start = y_start
self.y_end = None
self.x_start = x_start
self.x_end = x_end
self.style = style
self.label = label
self.track_label_param_list = None
self.x_scale_factor = x_scale_factor
self.y_scale_factor = y_scale_factor
# TODO: finish autoscale implementation
self.auto_scale = auto_scale
self.x_y_ratio = None
self.subplot_x_y_ratio = subplot_x_y_ratio
self.figure_x_y_ratio = figure_x_y_ratio
def init_coordinates(self):
y = self.y_start + self.style.internal_offset - self.style.distance
self.track_label_param_list = [[len(self[track_name].label) if (self[track_name].label and self[track_name].style.show_label) else 0,
self[track_name].style.label_fontsize] for track_name in self]
for track_name in self:
self[track_name].y_start = y + self.style.distance
self.x_end = max(self.x_end, self[track_name].x_end)
y += self[track_name].style.height
self.x_end = self.x_end * self.style.x_multiplier
self.y_end = y + self.style.internal_offset
#if self.auto_scale:
# self.y_scale_factor = 1
self.x_y_ratio = self.x_end / self.y_end
for track_name in self:
self[track_name].track_group_x_y_ratio = self.x_y_ratio
def draw(self, axes=None, style=None, label_shift=0):
self.init_coordinates()
used_style = style if style else self.style
current_subplot = axes if axes else plt.gca()
for track_name in self:
self[track_name].draw(axes=axes)
#x_text_offset = max(list(map(lambda s: s[0]*s[1], self.track_label_param_list)))
# TODO: add automatic calculation for x axis
label_shift_full = (-120 if max(list(map(lambda s: s[0]*s[1], self.track_label_param_list))) else 0) + label_shift
if self.label and used_style.show_label:
#print(self.label)
#print(self.style.label_x_shift + label_shift_full, (self.y_end + self.y_start) / 2)
#print(self.y_start, self.y_end)
#print()
current_subplot.annotate(self.label, xy=(0, (self.y_start + self.y_end)/2 + self.style.label_y_shift + 2), xycoords='data',
fontsize=self.style.label_fontsize,
fontstyle=self.style.label_fontstyle,
fontweight=self.style.label_fontweight,
xytext=(self.style.label_x_shift + label_shift_full,
0), textcoords='offset points',
ha=self.style.label_hor_aln, va=self.style.label_vert_aln)
| apache-2.0 |
balborian/libmesh | doc/statistics/github_traffic_base.py | 1 | 6329 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import math
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num, num2date
import calendar
# Github has a "traffic" page now, but it doesn't seem like you can
# put in an arbitrary date range? When I looked at it, it showed the
# numbers of unique visitors and views for the last two weeks only...
# So as long as I check back at least once every two weeks I can keep
# a record of it? Great...
# https://github.com/libMesh/libmesh/graphs/traffic
# A generic class which can be used to customize the axis labels,
# titles, etc. of the three column plot data.
class PlotData(object):
def __init__(self):
self.left_axis_label = ''
"""
Axis which goes on the left-hand side
"""
self.right_axis_label = ''
"""
Axis which goes on the right-hand side
"""
self.weekly_plot_filename = ''
"""
Filename for weekly plot.
"""
self.monthly_plot_filename = ''
"""
Filename for monthly plot.
"""
self.title_string1 = ''
"""
Part of title string describing the total
"""
self.title_string2 = ''
"""
Part of title string describing the average
"""
self.data_file = ''
"""
Name of a CSV file with date, total, unique data.
"""
# Function which plots the two datasets on a single plot with two y axes.
def plot_data(self):
# Read the data from the CSV file. "|S11" refers to an 11 character string.
# When you specify the dtype argument, the data is read into a structured
# array and the columns can be accessed with the 'f0', 'f1', etc. names.
data = np.genfromtxt(self.data_file, delimiter=',', dtype=("|S11", int, int))
date_strings = data['f0']
data_column2 = data['f1']
data_column3 = data['f2']
# Convert date strings into numbers.
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%Y-%b-%d')))
# Initialize an array with 1, 7, 14, ...
N = len(date_strings)
week_indexes = range(0, N, 7)
# Get total views and average unique viewers for each week
weekly_column2 = []
weekly_column3 = []
x_axis = []
for i in range(0, len(week_indexes)-1):
start = week_indexes[i]
stop = week_indexes[i+1]
weekly_column2.append(sum(data_column2[start:stop]));
weekly_column3.append(np.mean(data_column3[start:stop]));
x_axis.append(date_nums[week_indexes[i]])
# Get a reference to the figure
fig = plt.figure()
# The colors used come from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
muted_dark_blue = u'#4878cf'
muted_green = u'#6acc65'
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax1 = fig.add_subplot(111)
ax1.plot(x_axis, weekly_column2, color=muted_dark_blue, marker='o', linestyle='-', markersize=3)
ax1.set_ylabel(self.left_axis_label + ' (blue circles)')
# Choose the number of labels to create, then use linspace to create
# them and convert them to ints.
n_labels = 4
x_axis_ticks = np.linspace(0, len(x_axis)-1, n_labels).astype(int)
# Set tick labels and positions
ax1.set_xticks([x_axis[i] for i in x_axis_ticks])
ax1.set_xticklabels(['Week of \n' + str(num2date(x_axis[i]).date()) for i in x_axis_ticks])
# Plot the weekly column 3 data.
ax2 = ax1.twinx()
ax2.plot(x_axis, weekly_column3, color=muted_green, marker='s', linestyle='-', markersize=3)
ax2.set_ylabel(self.right_axis_label + ' (green squares)')
# Add title
title_string = self.title_string1 \
+ ' ' \
+ str(sum(data_column2)) \
+ ', ' \
+ self.title_string2 \
+ ' ' \
+ '%.1f' % np.mean(data_column3)
fig.suptitle(title_string)
# Save as PDF
plt.savefig(self.weekly_plot_filename)
# Make monthly plot
fig.clf()
# Generate date numbers at monthly intervals starting from '2014-Feb-17'
now = datetime.now()
month_intervals = [735281] # date2num for '2014-Feb-17'
for yr in xrange(2014, now.year+1):
for mo in xrange(1, 13):
# Skip Jan 2014
if (yr==2014 and mo==1):
continue
# http://stackoverflow.com/questions/27814983/how-to-get-month-interval-using-datetime-in-python
# Add the number of days in the current month to the previous date num to
# get the next one.
month_intervals.append(month_intervals[-1] + calendar.monthrange(yr, mo)[1])
# Find the indexes of each date.
month_indexes = []
for date in month_intervals:
# Not all data sets have all of these dates, so we just use the
# ones we have.
if date in date_nums:
month_indexes.append(date_nums.index(date))
# Get total views and average unique viewers for each month
monthly_column2 = [];
monthly_column3 = [];
x_axis = []
for i in range(0, len(month_indexes)-1):
start = month_indexes[i]
stop = month_indexes[i+1]
monthly_column2.append(sum(data_column2[start:stop]));
monthly_column3.append(np.mean(data_column3[start:stop]));
x_axis.append(date_nums[month_indexes[i]])
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax1 = fig.add_subplot(111)
ax1.plot(x_axis, monthly_column2, color=muted_dark_blue, marker='o', linestyle='-')
ax1.set_ylabel(self.left_axis_label + ' (blue circles)')
# Place an x-axis tick mark every x_step months. As we get more data,
# we'll have to increase x_step.
x_step = 4
x_axis_ticks = range(0, len(x_axis), x_step)
# Set tick labels and positions
ax1.set_xticks([x_axis[i] for i in x_axis_ticks])
ax1.set_xticklabels([num2date(x_axis[i]).strftime('%b\n%Y') for i in x_axis_ticks])
# Plot the average weekly unique visitors
ax2 = ax1.twinx()
ax2.plot(x_axis, monthly_column3, color=muted_green, marker='s', linestyle='-')
ax2.set_ylabel(self.right_axis_label + ' (green squares)')
# Save as PDF
plt.savefig(self.monthly_plot_filename)
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
peifu/tests | python/jira/jira_client.py | 1 | 4613 | #!/usr/bin/env python3
#coding: utf-8
from jira import JIRA
import re
import json
import prettytable as pt
import pandas as pd
import numpy as np
from io import StringIO
DEBUG_ENABLE = 0
MAX_ISSUE = 50
MAX_SUMMARY = 80
JIRA_SERVER = "cfg/jira_server.json"
JIRA_FILTER = "cfg/jira_filter.json"
JIRA_PATTERN = "project in ({}) AND priority in ({}) AND status in ({}) AND assignee in ({}) ORDER BY priority DESC, status ASC, assignee ASC"
FIELD_NAMES = ["Issue", "Issuetype", "Priority", "Status", "Assingee", "Creator", "Summary"]
FIELD_NAMES2 = ["Issue", "Priority", "Status", "Assingee", "Due Date", "Finish Date", "Summary"]
def debug(args):
if DEBUG_ENABLE == 1:
print(args)
def store_csv(filename, csv_str):
csv_file = open("filename", "w+")
csv_file.write(csv_str)
csv_file.close()
def init_config(cfg):
f = open(cfg, encoding="utf-8")
jira_config = json.load(f)
f.close()
debug(jira_config)
return jira_config
def init_config_with_json(cfg_json):
jira_config = json.loads(cfg_json)
debug(jira_config)
return jira_config
def init_jira(jira_config):
server = jira_config["server"]["server_addr"]
user = jira_config["server"]["user"]
pwd = jira_config["server"]["password"]
debug("server={}, user={}, pwd={}".format(server, user, pwd))
jira = JIRA({"server": server}, basic_auth=(user, pwd))
return jira
def get_filters(cfg):
f = open(cfg, encoding="utf-8")
filter_json = json.load(f)
f.close()
return filter_json["filter"]
def get_dataframe_from_csv(csv_str):
df = pd.DataFrame(pd.read_csv(StringIO(csv_str)))
print(df)
#pg = df[df['Assingee'].isin(["Pengguang.Zhu", "wei.hong"])]
#print(pg)
def init_table():
tb = pt.PrettyTable()
tb.field_names = FIELD_NAMES2
tb.align = 'l'
return tb
def update_table(tb, issue):
item = [issue.key, issue.fields.issuetype.name, issue.fields.priority.name,
issue.fields.status.name, issue.fields.assignee.key, issue.fields.creator.key,
issue.fields.summary[0:MAX_SUMMARY]]
item2 = [issue.key, issue.fields.priority.name,
issue.fields.status.name, issue.fields.assignee.key,
issue.fields.duedate, issue.fields.customfield_11614,
issue.fields.summary[0:MAX_SUMMARY]]
tb.add_row(item2)
def dump_table(tb):
s = tb.get_string()
print(s)
def get_csv(tb):
return tb.get_csv_string()
def dump_issue(issue):
debug(dir(issue.fields))
debug('{}:{}'.format(issue.key, issue.fields.summary))
item = [issue.key, issue.fields.issuetype, issue.fields.priority, issue.fields.status,
issue.fields.assignee, issue.fields.creator, issue.fields.summary]
debug(item)
item2 = [issue.fields.customfield_11614]
debug(item2)
def query_issues(jira, pattern, count):
issues = jira.search_issues(pattern, maxResults=count)
return issues
def get_issues_by_pattern(jira, pattern):
tb = init_table()
debug(pattern)
issues = query_issues(jira, pattern, MAX_ISSUE)
for issue in issues:
update_table(tb, issue)
# debug issue
dump_issue(issue)
break
return tb
def get_issues_by_filter(jira, filter):
tb = init_table()
pattern = JIRA_PATTERN.format(filter["project"], filter["priority"], filter["status"], filter["assignee"])
debug(pattern)
issues = query_issues(jira, pattern, MAX_ISSUE)
for issue in issues:
update_table(tb, issue)
# debug issue
dump_issue(issue)
return tb
def get_jira_csv(filter):
jira_config = init_config(JIRA_SERVER)
jira = init_jira(jira_config)
tb = get_issues_by_filter(jira, filter)
csv = tb.get_csv_string()
return csv
def get_jira_html(filter):
jira_config = init_config(JIRA_SERVER)
jira = init_jira(jira_config)
tb = get_issues_by_filter(jira, filter)
csv = tb.get_csv_string()
df = pd.DataFrame(pd.read_csv(StringIO(csv)))
html = df.to_html(index=False)
return html
def get_jira_table(filter):
jira_config = init_config(JIRA_SERVER)
jira = init_jira(jira_config)
tb = get_issues_by_filter(jira, filter)
s = tb.get_string()
return s
def test_jira_html():
filters = get_filters(JIRA_FILTER)
for filter in filters[0:1]:
s = get_jira_html(filter)
print(filter["name"])
print(s)
def test_jira_table():
filters = get_filters(JIRA_FILTER)
for filter in filters[0:1]:
s = get_jira_table(filter)
print(filter["name"])
print(s)
if __name__ == "__main__":
test_jira_html()
test_jira_table()
| gpl-2.0 |
timy/dm_spec | ana/mpi_spec_1d/plot_orien.py | 1 | 1634 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from itertools import product, combinations
n_esmb = 100000
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
data = np.loadtxt("res/euler.dat")
#draw cube
# r = [-1, 1]
# for s, e in combinations(np.array(list(product(r, r, r))), 2):
# if np.sum(np.abs(s - e)) == (r[1] - r[0]):
# ax.plot3D(*zip(s, e), color="b")
#draw sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u) * np.sin(v)
y = np.sin(u) * np.sin(v)
z = np.cos(v)
ax.plot_wireframe(x, y, z, color="r")
#draw a point
ax.scatter([0],[0],[0],color="g",s=100)
#draw a vector
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
phi = data[:,0]
theta = data[:,1]
psi = data[:,2]
x = np.sin(theta) * np.sin(psi);
y = np.sin(theta) * np.cos(psi);
z = np.cos(theta);
for i_esmb in range(n_esmb):
a = Arrow3D( [0, x[i_esmb]], [0, y[i_esmb]], [0, z[i_esmb]],
mutation_scale=20, lw=1, arrowstyle="-|>",
color=plt.cm.RdYlBu(i_esmb) )
ax.add_artist(a)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
| mit |
MSeifert04/astropy | astropy/visualization/wcsaxes/tests/test_wcsapi.py | 3 | 15461 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from textwrap import dedent
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy.io import fits
from astropy import units as u
from astropy.wcs.wcsapi import BaseLowLevelWCS, SlicedLowLevelWCS
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy.units import Quantity
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from astropy.wcs import WCS
from astropy.visualization.wcsaxes.frame import RectangularFrame, RectangularFrame1D
from astropy.visualization.wcsaxes.wcsapi import (WCSWorld2PixelTransform,
transform_coord_meta_from_wcs,
apply_slices)
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ['x', 'y']
WCS2D.wcs.cunit = ['km', 'km']
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0., 0.]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ['x', 'y', 'z']
WCS3D.wcs.cunit = ['km', 'km', 'km']
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0., 0., 1.]
@pytest.fixture
def wcs_4d():
header = dedent("""\
WCSAXES = 4 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CRPIX3 = 0.0 / Pixel coordinate of reference point
CRPIX4 = 5.0 / Pixel coordinate of reference point
CDELT1 = 0.4 / [min] Coordinate increment at reference point
CDELT2 = 2E-11 / [m] Coordinate increment at reference point
CDELT3 = 0.0027777777777778 / [deg] Coordinate increment at reference point
CDELT4 = 0.0013888888888889 / [deg] Coordinate increment at reference point
CUNIT1 = 'min' / Units of coordinate increment and value
CUNIT2 = 'm' / Units of coordinate increment and value
CUNIT3 = 'deg' / Units of coordinate increment and value
CUNIT4 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'TIME' / Coordinate type code
CTYPE2 = 'WAVE' / Vacuum wavelength (linear)
CTYPE3 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CTYPE4 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CRVAL1 = 0.0 / [min] Coordinate value at reference point
CRVAL2 = 0.0 / [m] Coordinate value at reference point
CRVAL3 = 0.0 / [deg] Coordinate value at reference point
CRVAL4 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
""")
return WCS(header=fits.Header.fromstring(header, sep='\n'))
@pytest.fixture
def cube_wcs():
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
cube_header = os.path.join(data_dir, 'cube_header')
header = fits.Header.fromtextfile(cube_header)
return WCS(header=header)
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ['RA---SIN', 'DEC--SIN']
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
def test_coord_type_from_ctype(cube_wcs):
_, coord_meta = transform_coord_meta_from_wcs(cube_wcs, RectangularFrame,
slices=(50, 'y', 'x'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ['l', 'r', 'b']
assert ticklabel_position == ['l', 'r', 'b']
assert ticks_position == ['l', 'r', 'b']
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['GLON-TAN', 'GLAT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cname = ['Longitude', '']
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [None, None]
assert coord_meta['default_axis_label'] == ['Longitude', 'pos.galactic.lat']
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['HPLN-TAN', 'HPLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.arcsec, u.arcsec]
assert coord_meta['wrap'] == [180., None]
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame,
slices=('y', 'x'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
# These axes should be swapped because of slices
assert axislabel_position == ['l', 'b']
assert ticklabel_position == ['l', 'b']
assert ticks_position == ['bltr', 'bltr']
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['HGLN-TAN', 'HGLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [180., None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['CRLN-TAN', 'CRLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [360., None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.hourangle, u.deg]
assert coord_meta['wrap'] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['spam', 'spam']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['scalar', 'scalar']
assert coord_meta['format_unit'] == [u.one, u.one]
assert coord_meta['wrap'] == [None, None]
def test_coord_type_1d_1d_wcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['WAVE']
wcs.wcs.crpix = [256.0]
wcs.wcs.cdelt = [-0.05]
wcs.wcs.crval = [50.0]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D)
assert coord_meta['type'] == ['scalar']
assert coord_meta['format_unit'] == [u.m]
assert coord_meta['wrap'] == [None]
def test_coord_type_1d_2d_wcs_correlated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['GLON-TAN', 'GLAT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D, slices=('x', 0))
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [None, None]
assert coord_meta['visible'] == [True, True]
def test_coord_type_1d_2d_wcs_uncorrelated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['WAVE', 'UTC']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cunit = ['nm', 's']
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D, slices=('x', 0))
assert coord_meta['type'] == ['scalar', 'scalar']
assert coord_meta['format_unit'] == [u.m, u.s]
assert coord_meta['wrap'] == [None, None]
assert coord_meta['visible'] == [True, False]
def test_coord_meta_4d(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(wcs_4d, RectangularFrame, slices=(0, 0, 'x', 'y'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
assert axislabel_position == ['', '', 'b', 'l']
assert ticklabel_position == ['', '', 'b', 'l']
assert ticks_position == ['', '', 'bltr', 'bltr']
def test_coord_meta_4d_line_plot(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(wcs_4d, RectangularFrame1D, slices=(0, 0, 0, 'x'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ['', '', 't', 'b']
assert ticklabel_position == ['', '', 't', 'b']
assert ticks_position == ['', '', 't', 'b']
@pytest.fixture
def sub_wcs(wcs_4d, wcs_slice):
return SlicedLowLevelWCS(wcs_4d, wcs_slice)
@pytest.mark.parametrize(("wcs_slice", "wcsaxes_slices", "world_map", "ndim"),
[
(np.s_[...], [0,0,'x','y'], (2, 3), 2),
(np.s_[...], [0,'x',0,'y'], (1, 2, 3), 3),
(np.s_[...], ['x',0,0,'y'], (0, 2, 3), 3),
(np.s_[...], ['x','y',0,0], (0, 1), 2),
(np.s_[:,:,0,:], [0, 'x', 'y'], (1, 2), 2),
(np.s_[:,:,0,:], ['x', 0, 'y'], (0, 1, 2), 3),
(np.s_[:,:,0,:], ['x', 'y', 0], (0, 1, 2), 3),
(np.s_[:,0,:,:], ['x', 'y', 0], (0, 1), 2),
])
def test_apply_slices(sub_wcs, wcs_slice, wcsaxes_slices, world_map, ndim):
transform_wcs, _, out_world_map = apply_slices(sub_wcs, wcsaxes_slices)
assert transform_wcs.world_n_dim == ndim
assert out_world_map == world_map
# parametrize here to pass to the fixture
@pytest.mark.parametrize("wcs_slice", [np.s_[:,:,0,:]])
def test_sliced_ND_input(sub_wcs, wcs_slice):
slices_wcsaxes = [0, 'x', 'y']
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
_, coord_meta = transform_coord_meta_from_wcs(sub_wcs, RectangularFrame, slices=slices_wcsaxes)
assert all(len(x) == 3 for x in coord_meta.values())
coord_meta['name'] = ['time', 'custom:pos.helioprojective.lat', 'custom:pos.helioprojective.lon']
coord_meta['type'] = ['scalar', 'latitude', 'longitude']
coord_meta['wrap'] = [None, None, 180.0]
coord_meta['unit'] = [u.Unit("min"), u.Unit("deg"), u.Unit("deg")]
coord_meta['visible'] = [False, True, True]
coord_meta['format_unit'] = [u.Unit("min"), u.Unit("arcsec"), u.Unit("arcsec")]
coord_meta['default_axislabel_position'] = ['', 'b', 't']
coord_meta['default_ticklabel_position'] = ['', 'b', 't']
coord_meta['default_ticks_position'] = ['', 'btlr', 'btlr']
# Validate the axes initialize correctly
plt.subplot(projection=sub_wcs, slices=slices_wcsaxes)
plt.close('all')
class LowLevelWCS5D(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return ['em.freq', 'time', 'pos.eq.ra', 'pos.eq.dec', 'phys.polarization.stokes']
@property
def world_axis_units(self):
return ['Hz', 'day', 'deg', 'deg', '']
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [np.asarray(pix) * scale for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])]
def array_index_to_world_values(self, *index_arrays):
return self.pixel_to_world_values(index_arrays[::-1])[::-1]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])]
def world_to_array_index_values(self, *world_arrays):
return np.round(self.world_to_array_index_values(world_arrays[::-1])[::-1]).astype(int)
@property
def world_axis_object_components(self):
return [('freq', 0, 'value'),
('time', 0, 'mjd'),
('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree'),
('stokes', 0, 'value')]
@property
def world_axis_object_classes(self):
return {'celestial': (SkyCoord, (), {'unit': 'deg'}),
'time': (Time, (), {'format': 'mjd'}),
'freq': (Quantity, (), {'unit': 'Hz'}),
'stokes': (Quantity, (), {'unit': 'one'})}
class TestWCSAPI:
def teardown_method(self, method):
plt.close('all')
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_wcsapi_5d(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
| bsd-3-clause |
skjerns/AutoSleepScorerDev | plotting.py | 1 | 13645 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 17:10:40 2017
@author: Simon
"""
import matplotlib.pyplot as plt
import pickle
import numpy as np
import seaborn as sns
import re
import sdill as dill
from sklearn.metrics import f1_score
import tools
cmap = sns.cubehelix_palette(8, start=2.8, rot=-.1, as_cmap=True)
fsize = np.array((4,3.5))
#%%
pkl = pickle.load(open('.\\results\\results_recurrent_', 'rb'))
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
for key in pkl:
confmat = [x[4] for x in pkl[key]]
tools.plot_confusion_matrix ('dataset_'+ key + '.png',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title=key)
pkl = pickle.load(open('.\\results\\results_recurrent_seqlen-rnn6.pkl', 'rb'))
#pkl['feat-LSTM'] = pkl.pop('pure_rnn_do_6')
confmat = [x[4] for x in pkl[list(pkl.keys())[0]]]
tools.plot_confusion_matrix ('recurrent_'+ key +'_newest.png',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='Feat-LSTM')
pkl = pickle.load(open('.\\results_recurrent_morel2.pkl', 'rb'))
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
for key in pkl:
confmat = pkl[key][4]
# plt.close('all')
tools.plot_confusion_matrix ('transfer_'+ key + '.png',confmat, t_label,figsize=fsize, perc=True,cmap=cmap, title=key)
#%% electrodes best plot
pkl = pickle.load(open('.\\results\\results_electrodes_morel2.pkl', 'rb'))
pkl.update(pickle.load(open('.\\results\\new_results_electrodes_feat.pkl', 'rb')))
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
confmat = [x[4] for x in pkl['annall']]
tools.plot_confusion_matrix ('confmat_ann.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, cbar=False)
confmat = [x[4] for x in pkl['cnn3morel2 all']]
tools.plot_confusion_matrix ('confmat_cnn.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap)
#%% Differenceplot
pkl = pickle.load(open('.\\results\\results_electrodes_morel2.pkl', 'rb'))
pkl.update(pickle.load(open('.\\results\\new_results_electrodes_feat.pkl', 'rb')))
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
cnn_eeg = np.mean([x[4] for x in pkl['cnn3morel2 eeg']], 0)
cnn_eog = np.mean([x[4] for x in pkl['cnn3morel2 eog']], 0)
cnn_emg = np.mean([x[4] for x in pkl['cnn3morel2 emg']], 0)
cnn_all = np.mean([x[4] for x in pkl['cnn3morel2 all']], 0)
ann_eeg = np.mean([x[4] for x in pkl['anneeg']], 0)
ann_eog = np.mean([x[4] for x in pkl['anneeg+eog']], 0)
ann_emg = np.mean([x[4] for x in pkl['anneeg+emg']], 0)
ann_all = np.mean([x[4] for x in pkl['annall']], 0)
tools.plot_difference_matrix('cnn_eeg.pdf', cnn_eeg, cnn_all, t_label,figsize=fsize, perc=True, title='EEG+EMG+EOG minus EEG')
tools.plot_difference_matrix('cnn_eog.pdf', cnn_eog, cnn_all, t_label,figsize=fsize, perc=True, title='EEG+EMG+EOG minus EEG+EOG')
tools.plot_difference_matrix('cnn_emg.pdf', cnn_emg, cnn_all, t_label,figsize=fsize, perc=True, title='EEG+EMG+EOG minus EEG+EMG')
tools.plot_difference_matrix('ann_eeg.pdf', ann_eeg, ann_all, t_label,figsize=fsize, perc=True, title='EEG+EMG+EOG minus EEG', cbar=False)
tools.plot_difference_matrix('ann_eog.pdf', ann_eog, ann_all, t_label,figsize=fsize, perc=True, title='EEG+EMG+EOG minus EEG+EOG', cbar=False)
tools.plot_difference_matrix('ann_emg.pdf', ann_emg, ann_all, t_label,figsize=fsize, perc=True, title='EEG+EMG+EOG minus EEG+EMG', cbar=False)
#%%
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
pkl = pickle.load(open('.\\results\\new_results_recurrent.pkl', 'rb'))
rec_ann = np.mean([x[4] for x in pkl['pure_rnn_do']], 0)
pkl = pickle.load(open('.\\results\\results_recurrent_morel2.pkl', 'rb'))
rec_cnn = np.mean([x[4] for x in pkl['LSTM moreL2_fc1']], 0)
tools.plot_confusion_matrix ('conf_feat-lstm.pdf', rec_ann, t_label,figsize=fsize, perc=True, cmap=cmap, title='', cbar=False)
tools.plot_confusion_matrix ('conf_cnn+lstm.pdf', rec_cnn, t_label,figsize=fsize, perc=True, cmap=cmap, title='')
tools.plot_difference_matrix('diff-cnn-lstm.pdf', rec_ann, rec_cnn , t_label,figsize=fsize, perc=True, title='')
plt.close('all')
#for key in pkl:
# confmat = pkl[key]]
# plt.close('all')
#
# tools.plot_confusion_matrix ('recurrent_'+ key + '.eps',np.mean(confmat,0), target, perc=True)
#%%
pkl = pickle.load(open('.\\results\\results_recurrent_emsa', 'rb'))
confmat = [x[4] for x in pkl[list(pkl)[0]]]
tools.plot_confusion_matrix ('dataset_'+ key + '.png',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='EMSA')
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
for key in pkl:
confmat = pkl[key][4]
tools.plot_confusion_matrix ('dataset_'+ key + '.png',confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title=key)
#%% Datasets
pkl = pickle.load(open('.\\results\\results_recurrent_emsa', 'rb'))
confmat = [x[4] for x in pkl[list(pkl)[1]]]
tools.plot_confusion_matrix ('dataset_emsaad.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='EMSAad')
pkl = pickle.load(open('.\\results\\results_dataset_emsach.pkl', 'rb'))
confmat = [x[4] for x in pkl[list(pkl)[1]]]
tools.plot_confusion_matrix ('dataset_emsach.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='EMSAch')
pkl = pickle.load(open('.\\results\\results_recurrent_edfx', 'rb'))
confmat = [x[4] for x in pkl[list(pkl)[1]]]
tools.plot_confusion_matrix ('dataset_edfx.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='Sleep-EDFx')
pkl = pickle.load(open('.\\results\\results_recurrent_vinc', 'rb'))
confmat = [x[4] for x in pkl[list(pkl)[1]]]
tools.plot_confusion_matrix ('dataset_vinc.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='UCD')
#pkl = pickle.load(open('.\\results\\results_recurrent_cshs100', 'rb'))
#confmat = [x[4] for x in pkl[list(pkl)[0]]]
#tools.plot_confusion_matrix ('dataset_cshs100.pdf',np.mean(confmat,0), t_label,figsize=fsize, perc=True, cmap=cmap, title='CCSHS100')
#%% Transfer confmats
pkl = pickle.load(open('.\\results_transfer_cshs50_cshs50', 'rb'))
t_label = ['W', 'S1', 'S2', 'SWS', 'REM']
key = 'cshs100'
confmat = pkl[key][4]
tools.plot_confusion_matrix ('transfer_cshs50_{}.pdf'.format(key),confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title='CCSHS100')
key = 'edfx'
confmat = pkl[key][4]
tools.plot_confusion_matrix ('transfer_cshs50_{}.pdf'.format(key),confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title='Sleep-EDFx')
key = 'emsaad'
confmat = pkl[key][4]
tools.plot_confusion_matrix ('transfer_cshs50_{}.pdf'.format(key),confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title='EMSAad')
key = 'emsach'
confmat = pkl[key][4]
tools.plot_confusion_matrix ('transfer_cshs50_{}.pdf'.format(key),confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title='EMSAch')
key = 'vinc'
confmat = pkl[key][4]
tools.plot_confusion_matrix ('transfer_cshs50_{}.pdf'.format(key),confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title='UCD')
key = 'vinc_scaled'
confmat = pkl[key][4]
tools.plot_confusion_matrix ('transfer_cshs50_{}.pdf'.format(key),confmat, t_label,figsize=fsize, perc=True, cmap=cmap, title='UCD z-scored')
#%% hypnograms
pkl = pickle.load(open('.\\results_transfer_cshs50_cshs50', 'rb'))
pred, targ, _ = pkl['cshs100'][5]
targ = np.roll(targ,4)
sub_pred = pred[5390:6560]
sub_targ = targ[5390:6560]
acc = np.mean(sub_targ==sub_pred)
f1 = f1_score(sub_targ, sub_pred, average='macro')
plt.figure(figsize=[8,3])
ax = plt.subplot(111)
tools.plot_hypnogram(sub_pred, c ='orangered', title='Ground Truth',ax1=ax)
tools.plot_hypnogram(sub_targ, c='royalblue', title='Prediction', ax1=ax, linewidth=1.9)
plt.legend(['Human Scorer','CNN+LSTM'], loc='lower right')
#plt.savefig('./plots/hypnogram_truth.pdf')
plt.savefig('./plots/hypnogram_prediction.pdf')
#%% distribution of predictions
preds = dill.load('predictions.pkl')
prob = preds['cnn_pred']
targ = preds['cnn_target']
pred = np.argmax(prob,1)
sorted_pred = np.fliplr(prob.argsort())
#overall = np.mean((sorted_pred[:,0]==targ) | (sorted_pred[:,1]==targ) )
#correct = np.max(prob[targ==pred],1)
#wrong = np.max(prob[targ!=pred],1)
#
#plt.subplot(1,2,1)
#sns.distplot(correct,bins=200);
#plt.title('Correct')
#plt.subplot(1,2,2)
#sns.distplot(wrong);
#plt.title('Wrong')
#
for i in range(5):
idx = pred==i
stage_prob = prob[idx]
stage_pred = pred[idx]
stage_targ = targ[idx]
correct = np.max(stage_prob[stage_targ==stage_pred],1)
wrong = np.max(stage_prob[stage_targ!=stage_pred],1)
plt.subplot(3,2,i+1)
sns.distplot(correct, hist=True, kde=False, bins=20);
plt.ylim([0,400])
plt.xlim([0,1])
# plt.subplot(5,2,i*2+2)
sns.distplot(wrong, hist=True, kde=False, color='r', bins=20);
plt.title(' Stage {}'.format(i))
plt.legend(['True', 'Predicted'])
plt.ylim([0,400])
plt.xlim([0,1])
#%% Plots for presentation cnn
plt.figure(figsize=[8,3])
test_acc = np.array([0.837, 0.850, 0.842, 0.848])
test_acc_min = np.array([0.820, 0.824, 0.811, 0.818])
test_acc_max = np.array([0.850, 0.872, 0.873, 0.869])
plt.plot(test_acc, 'bo')
plt.errorbar(np.arange(4),test_acc , [test_acc - test_acc_min, test_acc_max - test_acc],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test Accuracy')
plt.xticks(np.arange(4), ['EEG','EEG+EOG','EEG+EMG','All'])
test_f1 = np.array([0.710, 0.734, 0.732, 0.743])
test_f1_min = np.array([0.691, 0.706, 0.710, 0.721])
test_f1_max = np.array([0.736, 0.764, 0.761, 0.760])
plt.figure(figsize=[8,3])
plt.plot(test_f1, 'go')
plt.errorbar(np.arange(4),test_f1 , [test_f1 - test_f1_min, test_f1_max - test_f1],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test F1-score')
plt.xticks(np.arange(4), ['EEG','EEG+EOG','EEG+EMG','All'])
#%% Plots for presentation rnn
plt.figure(figsize=[5,3])
rec_acc = np.array([0.848, 0.856])
rec_acc_min = np.array([0.818, 0.836])
rec_acc_max = np.array([0.869, 0.876])
plt.plot(rec_acc[0], 'bo')
plt.errorbar([0] ,rec_acc[:1] , [rec_acc[:1] - rec_acc_min[:1], rec_acc_max[:1] - rec_acc[:1]],
fmt='.k', ecolor='gray', lw=1)
plt.plot(1,rec_acc[1], 'go')
plt.errorbar([1] ,rec_acc[1:] , [rec_acc[1:] - rec_acc_min[1:], rec_acc_max[1:] - rec_acc[1:]],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test Accuracy')
plt.xticks(np.arange(2), ['CNN','CNN+LSTM'])
plt.xlim([-1,2])
plt.figure(figsize=[5,3])
rec_f1 = np.array([0.743,0.764])
rec_f1_min = np.array([0.721,0.740])
rec_f1_max = np.array([0.760,0.785])
plt.plot(rec_f1[0], 'bo')
plt.errorbar(0,rec_f1[:1] , [rec_f1[:1] - rec_f1_min[:1], rec_f1_max[:1] - rec_f1[:1]],
fmt='.k', ecolor='gray', lw=1)
plt.plot(1,rec_f1[1], 'go')
plt.errorbar(1,rec_f1[1:] , [rec_f1[1:] - rec_f1_min[1:], rec_f1_max[1:] - rec_f1[1:]],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test F1-score')
plt.xticks(np.arange(2), ['CNN','CNN+LSTM'])
plt.xlim([-1,2])
#%% plotting hand vs automatic
feat_acc = np.array([0.812, 0.834, 0.836, 0.847])
feat_acc_min = np.array([0.772, 0.805, 0.806, 0.823])
feat_acc_max = np.array([0.832, 0.853, 0.857, 0.863 ])
feat_f1 = np.array([0.647, 0.677, 0.714 , 0.730 ])
feat_f1_min = np.array([0.617, 0.648, 0.687, 0.706])
feat_f1_max = np.array([0.667, 0.695, 0.739, 0.754])
rec_acc = np.array([0.856])
rec_acc_min = np.array([0.836])
rec_acc_max = np.array([0.876])
rec_f1 = np.array([0.764])
rec_f1_min = np.array([0.740])
rec_f1_max = np.array([0.785])
frec_acc = np.array([0.853])
frec_acc_min = np.array([0.815])
frec_acc_max = np.array([0.883])
frec_f1 = np.array([0.754])
frec_f1_min = np.array([0.713])
frec_f1_max = np.array([0.783])
plt.figure(figsize=[6,3])
plt.plot(feat_acc, 'go')
plt.errorbar(np.arange(4),feat_acc , [feat_acc - feat_acc_min, feat_acc_max - feat_acc], fmt='.k', ecolor='gray', lw=1)
plt.plot(np.arange(4)+0.2, test_acc, 'bo')
plt.errorbar(np.arange(4)+0.2, test_acc , [test_acc - test_acc_min, test_acc_max - test_acc], fmt='.k', ecolor='gray', lw=1)
plt.title('Test Accuracy')
plt.legend(['Handcrafted with ANN','Automatic with CNN'] ,loc=4 )
plt.xticks(np.arange(4), ['EEG','EEG+EOG','EEG+EMG','All'])
plt.figure(figsize=[6,3])
plt.plot(feat_f1, 'go')
plt.errorbar(np.arange(4),feat_f1 , [feat_f1 - feat_f1_min, feat_f1_max - feat_f1], fmt='.k', ecolor='gray', lw=1)
plt.plot(np.arange(4)+0.2, test_f1, 'bo')
plt.errorbar(np.arange(4)+0.2, test_f1 , [test_f1 - test_f1_min, test_f1_max - test_f1], fmt='.k', ecolor='gray', lw=1)
plt.title('Test F1')
plt.legend(['Handcrafted with ANN','Automatic with CNN'] ,loc=4 )
plt.xticks(np.arange(4), ['EEG','EEG+EOG','EEG+EMG','All'])
plt.figure(figsize=[5,3])
plt.plot(frec_acc, 'go')
plt.errorbar(np.arange(1),frec_acc , [frec_acc - frec_acc_min, frec_acc_max - frec_acc],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test Accuracy')
plt.xlim([-1,2])
plt.plot(0.2,rec_acc, 'bo')
plt.errorbar(np.arange(1)+0.2,rec_acc , [rec_acc - rec_acc_min, rec_acc_max - rec_acc],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test Accuracy Temporal')
plt.legend(['Handcrafted with RNN','Automatic with CNN+LSTM'] ,loc=4 )
plt.xticks(np.arange(1), ['All'])
plt.xlim([-1,2])
plt.figure(figsize=[5,3])
plt.plot(frec_f1, 'go')
plt.errorbar(np.arange(1),frec_f1 , [frec_f1 - frec_f1_min, frec_f1_max - frec_f1],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test Accuracy Temporal')
plt.xlim([-1,2])
plt.plot(0.2,rec_f1, 'bo')
plt.errorbar(np.arange(1)+0.2,rec_f1 , [rec_f1 - rec_f1_min, rec_f1_max - rec_f1],
fmt='.k', ecolor='gray', lw=1)
plt.title('Test F1 Temporal')
plt.legend(['Handcrafted with RNN','Automatic with CNN+LSTM'] ,loc=4 )
plt.xticks(np.arange(1), ['All'])
plt.xlim([-1,2]) | gpl-3.0 |
hrjn/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 13 | 26703 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.datasets import make_multilabel_classification
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_regression_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
param_grid = product((1.0, 1e-2), (True, False),
('svd', 'cholesky', 'lsqr', 'sparse_cg'))
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for (alpha, intercept, solver) in param_grid:
# Ridge with explicit sample_weight
est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
coefs = est.coef_
inter = est.intercept_
# Closed form of the weighted regularized least square
# theta = (X^T W X + alpha I)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
I = np.eye(n_features)
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
I = np.eye(n_features + 1)
I[0, 0] = 0
cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs, cf_coefs)
else:
assert_array_almost_equal(coefs, cf_coefs[1:])
assert_almost_equal(inter, cf_coefs[0])
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
reg = Ridge(alpha=0.0)
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(reg.predict(X_test), [1., 2, 3, 4])
assert_equal(len(reg.coef_.shape), 1)
assert_equal(type(reg.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(reg.coef_.shape), 2)
assert_equal(type(reg.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
fit_intercept = filter_ == DENSE_FILTER
if fit_intercept:
X_diabetes_ = X_diabetes - X_diabetes.mean(0)
else:
X_diabetes_ = X_diabetes
ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept)
ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept)
# because fit_intercept is applied
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes, fit_intercept)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes_[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes_[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes_, y_diabetes, fit_intercept)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('neg_mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for reg in (RidgeClassifier(), RidgeClassifierCV()):
reg.fit(filter_(X_iris), y_iris)
assert_equal(reg.coef_.shape, (n_classes, n_features))
y_pred = reg.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
cv = KFold(5)
reg = RidgeClassifierCV(cv=cv)
reg.fit(filter_(X_iris), y_iris)
y_pred = reg.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def check_dense_sparse(test_func):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
yield check_dense_sparse, test_func
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
reg = RidgeClassifier(class_weight={1: 0.001})
reg.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
reg = RidgeClassifier(class_weight='balanced')
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
rega = RidgeClassifier(class_weight='balanced')
rega.fit(X, y)
assert_equal(len(rega.classes_), 2)
assert_array_almost_equal(reg.coef_, rega.coef_)
assert_array_almost_equal(reg.intercept_, rega.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for reg in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
reg1 = reg()
reg1.fit(iris.data, iris.target)
reg2 = reg(class_weight='balanced')
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Check that sample_weight and class_weight are multiplicative
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight ** 2)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(reg1.coef_, reg2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
reg.fit(X, y)
# we give a small weights to class 1
reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
reg.fit(X, y)
assert_array_equal(reg.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
gs = GridSearchCV(Ridge(), parameters, cv=cv)
gs.fit(X, y, sample_weight=sample_weight)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
def test_errors_and_values_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
n = 5
y = rng.randn(n)
v = rng.randn(n)
Q = rng.randn(len(v), len(v))
QT_y = Q.T.dot(y)
G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_errors_and_values_svd_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
for n, p in zip((5, 10), (12, 6)):
y = rng.randn(n)
v = rng.randn(p)
U = rng.randn(n, p)
UT_y = U.T.dot(y)
G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_ridge_classifier_no_support_multilabel():
X, y = make_multilabel_classification(n_samples=10, random_state=0)
assert_raises(ValueError, RidgeClassifier().fit, X, y)
| bsd-3-clause |
alisaad05/cimpy | cimpy.py | 1 | 13680 | #!/usr/bin/env python
#import math
# import sys
import numpy as np
import matplotlib.pyplot as plt
import mayavi.mlab as ml # to use mayavi mlab scripting tools and rendering
from mpl_toolkits.mplot3d import Axes3D #to use Scatter/Scatter 3D
from tvtk.api import tvtk
# import vtk
# All returned arrays are cast into either numpy or numarray arrays
#arr=numpy.array
# ALI: "self" in python is equivalent to "this" in C++
# TODO : implement class create_grid(filename) to link with Struct_mesh_generator.cpp ==> possible use of ctypes wrapping ??
# TODO: check doctest to parse unit tests in docstrings and run them ... can replace the "tutoriaux_testes" in cimlib
# TODO: https://docs.python.org/2/library/doctest.html
#HINT: to parse a **kwargs input, one can use "has_key"
class Tmesh:
"""Mesh object containing nodes and elements, as well connectivity and dimension information"""
def __init__(self, filename = None):
"""Creates a Tmesh object by reading the specified file."""
if filename is None:
print "CIMPY:: The mesh filename is not specified. Please input a valid mesh name with a .t extension"
self.name = "Maillage1"
else:
if filename[-2:].lower() == ".t":
print "CIMPY:: Mesh filename is valid and ready for reading ..."
self.name = filename.split()[0]
# self.point_array = [] # TODO: obsolete ... replaced by dictionary
self.nodes_dict = {} # or =dict()
self.elements_dict = {} # or =dict()
# self.element_array = [] # TODO: obsolete ... replaced by dictionary
self.Parse_input_file(filename)
elif filename[-4:].lower() == ".msh":
print "CIMPY:: Coming soon: convert " + filename + " to a cimlib compatible mesh format (Tmesh) ? "
# TODO link with gmsh2mtc.py
else:
raise Exception("ERROR: The extension of " + filename + " filename is not recognised")
def unittest(self):
""" Not implemented yet """
print "My name is: " + self.name
def Parse_input_file(self, filename):
""" takes a .t mesh filename and read it into a Tmesh datastructure """
f = open(filename)
header = f.readline().strip().split()
# TODO: add condition: if len(header) != 4 then ...
self.nbNodes, self.dimNode,self.nbElements, self.dimElement = header
self.nbNodes = int(self.nbNodes)
self.nbElements = int(self.nbElements)
self.dimElement = int(self.dimElement)
self.dimNode = int(self.dimNode)
for i in xrange(self.nbNodes):
node = f.readline().strip().split()
self.nodes_dict[i+1] = map(float, node)
# A fictitious node should be considered (but NOT USED FOR MESH VIEW)
if self.dimNode == 2:
self.nodes_dict[0]=[0,0]
elif self.dimNode == 3:
self.nodes_dict[0]=[0,0,0]
else : raise Exception ("dimNode ("+ str(self.dimNode) + ") should be 2 or 3")
# nodes_dict is a map of points IDs and their coordinates
# To access the coordinates of a node: nodes_dict[node_id]
# To access a component in the coord: nodes_dict[node_id][component]
line = f.readline().strip().split()
#
while len(line) == 0 : line = f.readline().strip().split()
connectivity = line # first non blank line
for i in xrange(self.nbElements):
self.elements_dict[i+1] = map(int, connectivity)
connectivity = f.readline().strip().split()
f.close()
def View(self, engine = "mpl" , **kwargs):
"""
Plots the mesh in a figure. Supports 2D and 3D mesh with two visualization backends. Call signature ::\n
Tmesh.View()
- engine:
- `mpl` to use a matplotlib as 2D and 3D scatter plotting backend
- `myv` (default) to use Mayavi for 2D and 3D mesh rendering (using `tvtk`)
- kwargs:
- if 'engine' argument is set to `mpl` then Matplotlib's keywords are used:
- linestyle or ls : '-' , '--' , '-.' ...
- linewidth or lw float value in points
- marker: 'x' , 'o' , 'v' ...
- color: 'r' (red), 'k' (black) , 'b' (blue) ...
- [check url: http://matplotlib.org/1.3.1/api/artist_api.html#module-matplotlib.lines] \n\n
- if 'engine' argument is set to "myv" then Mayavi's keywords are used:
- color: the color of the vtk object. Overides the colormap, if any, when specified. This is specified as a triplet of float ranging from 0 to 1, eg (1, 1, 1) for white.
- colormap: type of colormap to use.
- extent: [xmin, xmax, ymin, ymax, zmin, zmax] Default is the x, y, z arrays extent. Use this to change the extent of the object created.
"""
dim = self.dimNode
nb_points_real = self.GetNumberOfPoints()
nb_points = nb_points_real + 1 # the fictitious node of index 0 and coordinates [0,0]
### a dict constructor is necessary so that the class's member
### dictionary wont be affected when fictitious triangles are suppressed
real_elements_dict = dict(self.elements_dict)
# the 'enumerate' function is really nice !
for i,connec_list in enumerate(real_elements_dict.values()):
if 0 in connec_list : del real_elements_dict[i+1]
# i+1 because enumerate starts at 0 while my dict starts at 1
# The lists have to be cast into numpy arrays before using the TRI plot
points = np.asarray(self.nodes_dict.values())
simplexes = np.asarray(real_elements_dict.values())
# Extracting columns
x = points[:,0]
y = points[:,1]
if dim == 2:
# points_2d is used in mayavi rendering
points_2d = np.column_stack([points, np.zeros((nb_points,1))])
elif dim == 3:
# z is used in matplotlib rendering
z = points[:,2]
# points_3d is used in mayavi rendering
points_3d = points
if engine == "mpl":
if dim == 2:
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(x, y, simplexes, **kwargs)
plt.title('Mesh')
plt.show()
elif dim == 3:
# TODO: add line connection between scattered points
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', aspect ='equal')
ax.scatter3D(x, y, z, **kwargs)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.axis()
plt.show()
elif engine == "myv":
if dim == 2:
#this way works
# z = np.zeros_like(x)
# triangles = []
# for trigl in real_elements_dict.values(): triangles.append(tuple(trigl))
# ml.triangular_mesh(x, y, z, triangles, representation = 'wireframe')
#another way
simplex_type = 5 # = 10 for tetrahedron
ug = tvtk.UnstructuredGrid(points= points_2d)
if dim == 3:
simplex_type = tvtk.Tetra().cell_type # = 10 for tetrahedron
ug = tvtk.UnstructuredGrid(points= points_3d)
ug.set_cells(simplex_type, simplexes)
fig = ml.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), figure=ug.class_name[3:])
surf = ml.pipeline.surface(ug, opacity=0.01)
RGB= (0,0,0.5)
#edge_color = tuple(i/255 for i in RGB)
ml.pipeline.surface(ml.pipeline.extract_edges(surf), color= RGB )
### Getters ###
def GetNumberOfPoints(self):
""" returns the total number of nodes in the mesh """
return self.nbNodes
def GetNumberOfElements(self):
""" returns the total number of elements in the mesh """
return self.nbElements
def GetMeshPointDimension(self):
""" returns the mesh dimension 2D or 3D """
return self.dimNode
def GetMeshElementDimension(self):
""" returns the number of nodes in a single element """
return self.dimElement
def GetPointCoordinates(self, point_id):
""" returns a node coordinates from the index """
return self.nodes_dict[point_id]
def NodesToFile(self, filename= None):
""" returns an array containing the nodes coordinates """
if filename is None:
print "CIMPY:: The output filename is not specified ... aborting"
else:
g = open(filename, "w")
for id in range(1,self.nbNodes+1):
x = str(self.nodes_dict[id][0])
y = str(self.nodes_dict[id][1])
if self.dimNode == 3: z = str(self.nodes_dict[id][2])
else: z=""
g.write(x + "\t" + y + "\t" + z)
if id < self.nbNodes : g.write("\n")
g.close()
def ElementsToFile(self, filename= None):
""" returns an array containing the triangles / connectivity"""
if filename is None:
print "CIMPY:: The output filename is not specified ... aborting"
else:
g = open(filename, "w")
for id in range(1,self.nbElements+1):
s= ""
for node in self.elements_dict[id]:
s += str(node)+ "\t"
if self.dimElement == 4: s += str(self.elements_dict[id][4])
g.write(s)
if id < self.nbElements : g.write("\n")
g.close()
# def GetArrayOfConnectivity(self, filename= None):
# """ returns an array containing the elements nodes indices """
# connec_list = []
# for element in self.element_array:
# connec_list.append(element.connectivity)
#
# if filename is not None:
# g = open(filename, "w")
# for element in self.element_array:
# g.write(element.GetConnectivityString(self.dimNode))
# g.close()
#
# return connec_list
def GetElement(self, element_id):
""" returns the element from the index"""
""" not implemented """
return element_id
#==================================================
#==================================================
class Tnode:
"""Point object with coordinates and index information"""
def __init__(self, point_id, point_coords):
""" Initializes the point (node) by its index and coordinates as arguments """
self.index = point_id
# mapping is necessary to convert string list to float coordinates
self.coordinates = map(float,point_coords)
# def GetCoordinateString(self, dimension):
# if dimension == 2:
# return str(self.coordinates[0]) + str("\t") + str(self.coordinates[1]) + str("\n")
# elif dimension == 3:
# return str(self.coordinates[0]) + str("\t") + str(self.coordinates[1]) + str("\t") + str(self.coordinates[2]) + str("\n")
#
#==================================================
#==================================================
class Telement:
"""Element object with node indices and element index information"""
def __init__(self, element_id, element_connectivity, input_mesh):
""" Initializes the element by its index and the indices of the its nodes as arguments """
# element_id is the index of the element
self.index = element_id
# connectivity is a list of indices (3 in 2D, 4 in 3D) of the triangle/tetrahedron nodes
self.connectivity = map(int, element_connectivity)
self.dimension = input_mesh.GetMeshElementDimension()
# each element should know the coordinates of its own nodes
if self.dimension == 3:
id1, id2, id3 = self.connectivity
elif self.dimension == 4:
id1, id2, id3, id4 = self.connectivity
else:
raise Exception ("CIMPY:: Error Telement dimension" +str(self.dimension) + ": an element's dimension should be either 3 (triangle in 2D) or 4")
nodes_dic = input_mesh.nodes_dict
self.node1 = nodes_dic[id1]
self.node2 = nodes_dic[id2]
self.node3 = nodes_dic[id3]
if self.dimension == 4: self.node4 = nodes_dic[id4]
def GetConnectivityString(self, dimension):
if dimension == 2:
return str(self.connectivity[0]) + str("\t") + str(self.connectivity[1]) + str("\t") + str(self.connectivity[2]) + str("\n")
elif dimension == 3:
return str(self.connectivity[0]) + str("\t") + str(self.connectivity[1]) + str("\t") + str(self.connectivity[2]) + + str("\t") + str(self.connectivity[3]) + str("\n")
#==================================================
def view(filename, engine= "mpl"):
""" a cimpy function to visualize a mesh using cimpy's Tmesh class """
""" argument: filename of the mesh
"""
mesh = Tmesh(filename)
mesh.View(engine)
#==================================================
| gpl-2.0 |
kevin-intel/scikit-learn | examples/ensemble/plot_gradient_boosting_categorical.py | 6 | 9151 | """
================================================
Categorical Feature Support in Gradient Boosting
================================================
.. currentmodule:: sklearn
In this example, we will compare the training times and prediction
performances of :class:`~ensemble.HistGradientBoostingRegressor` with
different encoding strategies for categorical features. In
particular, we will evaluate:
- dropping the categorical features
- using a :class:`~preprocessing.OneHotEncoder`
- using an :class:`~preprocessing.OrdinalEncoder` and treat categories as
ordered, equidistant quantities
- using an :class:`~preprocessing.OrdinalEncoder` and rely on the :ref:`native
category support <categorical_support_gbdt>` of the
:class:`~ensemble.HistGradientBoostingRegressor` estimator.
We will work with the Ames Lowa Housing dataset which consists of numerical
and categorical features, where the houses' sales prices is the target.
"""
print(__doc__)
# %%
# Load Ames Housing dataset
# -------------------------
# First, we load the ames housing data as a pandas dataframe. The features
# are either categorical or numerical:
from sklearn.datasets import fetch_openml
X, y = fetch_openml(data_id=41211, as_frame=True, return_X_y=True)
n_categorical_features = (X.dtypes == 'category').sum()
n_numerical_features = (X.dtypes == 'float').sum()
print(f"Number of samples: {X.shape[0]}")
print(f"Number of features: {X.shape[1]}")
print(f"Number of categorical features: {n_categorical_features}")
print(f"Number of numerical features: {n_numerical_features}")
# %%
# Gradient boosting estimator with dropped categorical features
# -------------------------------------------------------------
# As a baseline, we create an estimator where the categorical features are
# dropped:
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector
dropper = make_column_transformer(
('drop', make_column_selector(dtype_include='category')),
remainder='passthrough')
hist_dropped = make_pipeline(dropper,
HistGradientBoostingRegressor(random_state=42))
# %%
# Gradient boosting estimator with one-hot encoding
# -------------------------------------------------
# Next, we create a pipeline that will one-hot encode the categorical features
# and let the rest of the numerical data to passthrough:
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = make_column_transformer(
(OneHotEncoder(sparse=False, handle_unknown='ignore'),
make_column_selector(dtype_include='category')),
remainder='passthrough')
hist_one_hot = make_pipeline(one_hot_encoder,
HistGradientBoostingRegressor(random_state=42))
# %%
# Gradient boosting estimator with ordinal encoding
# -------------------------------------------------
# Next, we create a pipeline that will treat categorical features as if they
# were ordered quantities, i.e. the categories will be encoded as 0, 1, 2,
# etc., and treated as continuous features.
from sklearn.preprocessing import OrdinalEncoder
import numpy as np
ordinal_encoder = make_column_transformer(
(OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=np.nan),
make_column_selector(dtype_include='category')),
remainder='passthrough')
hist_ordinal = make_pipeline(ordinal_encoder,
HistGradientBoostingRegressor(random_state=42))
# %%
# Gradient boosting estimator with native categorical support
# -----------------------------------------------------------
# We now create a :class:`~ensemble.HistGradientBoostingRegressor` estimator
# that will natively handle categorical features. This estimator will not treat
# categorical features as ordered quantities.
#
# Since the :class:`~ensemble.HistGradientBoostingRegressor` requires category
# values to be encoded in `[0, n_unique_categories - 1]`, we still rely on an
# :class:`~preprocessing.OrdinalEncoder` to pre-process the data.
#
# The main difference between this pipeline and the previous one is that in
# this one, we let the :class:`~ensemble.HistGradientBoostingRegressor` know
# which features are categorical.
# The ordinal encoder will first output the categorical features, and then the
# continuous (passed-through) features
categorical_mask = ([True] * n_categorical_features +
[False] * n_numerical_features)
hist_native = make_pipeline(
ordinal_encoder,
HistGradientBoostingRegressor(random_state=42,
categorical_features=categorical_mask)
)
# %%
# Model comparison
# ----------------
# Finally, we evaluate the models using cross validation. Here we compare the
# models performance in terms of
# :func:`~metrics.mean_absolute_percentage_error` and fit times.
from sklearn.model_selection import cross_validate
import matplotlib.pyplot as plt
scoring = "neg_mean_absolute_percentage_error"
dropped_result = cross_validate(hist_dropped, X, y, cv=3, scoring=scoring)
one_hot_result = cross_validate(hist_one_hot, X, y, cv=3, scoring=scoring)
ordinal_result = cross_validate(hist_ordinal, X, y, cv=3, scoring=scoring)
native_result = cross_validate(hist_native, X, y, cv=3, scoring=scoring)
def plot_results(figure_title):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
plot_info = [('fit_time', 'Fit times (s)', ax1, None),
('test_score', 'Mean Absolute Percentage Error', ax2,
(0, 0.20))]
x, width = np.arange(4), 0.9
for key, title, ax, y_limit in plot_info:
items = [dropped_result[key], one_hot_result[key], ordinal_result[key],
native_result[key]]
ax.bar(x, [np.mean(np.abs(item)) for item in items],
width, yerr=[np.std(item) for item in items],
color=['C0', 'C1', 'C2', 'C3'])
ax.set(xlabel='Model', title=title, xticks=x,
xticklabels=["Dropped", "One Hot", "Ordinal", "Native"],
ylim=y_limit)
fig.suptitle(figure_title)
plot_results("Gradient Boosting on Adult Census")
# %%
# We see that the model with one-hot-encoded data is by far the slowest. This
# is to be expected, since one-hot-encoding creates one additional feature per
# category value (for each categorical feature), and thus more split points
# need to be considered during fitting. In theory, we expect the native
# handling of categorical features to be slightly slower than treating
# categories as ordered quantities ('Ordinal'), since native handling requires
# :ref:`sorting categories <categorical_support_gbdt>`. Fitting times should
# however be close when the number of categories is small, and this may not
# always be reflected in practice.
#
# In terms of prediction performance, dropping the categorical features leads
# to poorer performance. The three models that use categorical features have
# comparable error rates, with a slight edge for the native handling.
# %%
# Limitting the number of splits
# ------------------------------
#
# In general, one can expect poorer predictions from one-hot-encoded data,
# especially when the tree depths or the number of nodes are limited: with
# one-hot-encoded data, one needs more split points, i.e. more depth, in order
# to recover an equivalent split that could be obtained in one single split
# point with native handling.
#
# This is also true when categories are treated as ordinal quantities: if
# categories are `A..F` and the best split is `ACF - BDE` the one-hot-encoder
# model will need 3 split points (one per category in the left node), and the
# ordinal non-native model will need 4 splits: 1 split to isolate `A`, 1 split
# to isolate `F`, and 2 splits to isolate `C` from `BCDE`.
#
# How strongly the models' performances differ in practice will depend on the
# dataset and on the flexibility of the trees.
#
# To see this, let us re-run the same analysis with under-fitting models where
# we artificially limit the total number of splits by both limitting the number
# of trees and the depth of each tree.
for pipe in (hist_dropped, hist_one_hot, hist_ordinal, hist_native):
pipe.set_params(histgradientboostingregressor__max_depth=3,
histgradientboostingregressor__max_iter=15)
dropped_result = cross_validate(hist_dropped, X, y, cv=3, scoring=scoring)
one_hot_result = cross_validate(hist_one_hot, X, y, cv=3, scoring=scoring)
ordinal_result = cross_validate(hist_ordinal, X, y, cv=3, scoring=scoring)
native_result = cross_validate(hist_native, X, y, cv=3, scoring=scoring)
plot_results("Gradient Boosting on Adult Census (few and small trees)")
plt.show()
# %%
# The results for these under-fitting models confirm our previous intuition:
# the native category handling strategy performs the best when the splitting
# budget is constrained. The two other strategies (one-hot encoding and
# treating categories as ordinal values) lead to error values comparable
# to the baseline model that just dropped the categorical features altogether.
| bsd-3-clause |
schoolie/bokeh | bokeh/sampledata/daylight.py | 13 | 2683 | """ Daylight hours from http://www.sunrisesunset.com
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'daylight sample data requires Pandas (http://pandas.pydata.org) to be installed')
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/io/excel.py | 4 | 49188 | """
Module parse to/from Excel
"""
#----------------------------------------------------------------------
# ExcelFile class
import os
import datetime
import abc
import numpy as np
from pandas.io.parsers import TextParser
from pandas.io.common import _is_url, _urlopen
from pandas.tseries.period import Period
from pandas import json
from pandas.compat import map, zip, reduce, range, lrange, u, add_metaclass
from pandas.core import config
from pandas.core.common import pprint_thing
import pandas.compat as compat
import pandas.compat.openpyxl_compat as openpyxl_compat
import pandas.core.common as com
from warnings import warn
from distutils.version import LooseVersion
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.%s.writer" % ext,
engine_name, validator=str)
_writer_extensions.append(ext)
def get_writer(engine_name):
if engine_name == 'openpyxl':
try:
import openpyxl
# with version-less openpyxl engine
# make sure we make the intelligent choice for the user
if LooseVersion(openpyxl.__version__) < '2.0.0':
return _writers['openpyxl1']
else:
return _writers['openpyxl2']
except ImportError:
# fall through to normal exception handling below
pass
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '%s'" % engine_name)
def read_excel(io, sheetname=0, **kwds):
"""Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, file-like object, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed sheet
positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
header : int, default 0
Row to use for the column labels of the parsed DataFrame
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be parsed
* If string then indicates comma separated list of column names and
column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
has_index_names : boolean, default False
True if the cols defined in index_col have an index name and are
not in the header. Index name will be placed on a separate line below
the header.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheetname argument
for more information on when a Dict of Dataframes is returned.
"""
if 'kind' in kwds:
kwds.pop('kind')
warn("kind keyword is no longer supported in read_excel and may be "
"removed in a future version", FutureWarning)
engine = kwds.pop('engine', None)
return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See ExcelFile.parse for more documentation
Parameters
----------
io : string, file-like object or xlrd workbook
If a string, expected to be a path to xls or xlsx file
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
import xlrd # throw an ImportError if we need to
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9): # pragma: no cover
raise ImportError("pandas requires xlrd >= 0.9.0 for excel "
"support, current version " + xlrd.__VERSION__)
self.io = io
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: %s" % engine)
if isinstance(io, compat.string_types):
if _is_url(io):
data = _urlopen(io).read()
self.book = xlrd.open_workbook(file_contents=data)
else:
self.book = xlrd.open_workbook(io)
elif engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
convert_float=True, has_index_names=False, converters=None, **kwds):
"""Read an Excel table into DataFrame
Parameters
----------
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed sheet
positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
header : int, default 0
Row to use for the column labels of the parsed DataFrame
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
* If None then parse all columns
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be
parsed
* If string then indicates comma separated list of column names and
column ranges (e.g. "A:E" or "A,C,E:F")
parse_dates : boolean, default False
Parse date Excel values,
date_parser : function default None
Date parsing function
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
thousands : str, default None
Thousands separator
chunksize : int, default None
Size of file chunk to read for lazy evaluation.
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all
numeric data will be read in as floats: Excel stores all numbers as
floats internally.
has_index_names : boolean, default False
True if the cols defined in index_col have an index name and are
not in the header
verbose : boolean, default False
Set to True to print a single statement when reading each
excel sheet.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheetname argument
for more information on when a Dict of Dataframes is returned.
"""
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
skip_footer = skipfooter
return self._parse_excel(sheetname=sheetname, header=header,
skiprows=skiprows,
index_col=index_col,
has_index_names=has_index_names,
parse_cols=parse_cols,
parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values,
thousands=thousands, chunksize=chunksize,
skip_footer=skip_footer,
convert_float=convert_float,
converters=converters,
**kwds)
def _should_parse(self, i, parse_cols):
def _range2cols(areas):
"""
Convert comma separated list of column names and column ranges to a
list of 0-based column indexes.
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
def _excel2num(x):
"Convert Excel column name like 'AB' to 0-based column index"
return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
x.upper().strip(), 0) - 1
cols = []
for rng in areas.split(','):
if ':' in rng:
rng = rng.split(':')
cols += lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
else:
cols.append(_excel2num(rng))
return cols
if isinstance(parse_cols, int):
return i <= parse_cols
elif isinstance(parse_cols, compat.string_types):
return i in _range2cols(parse_cols)
else:
return i in parse_cols
def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, has_index_names=None, parse_cols=None,
parse_dates=False, date_parser=None, na_values=None,
thousands=None, chunksize=None, convert_float=True,
verbose=False, **kwds):
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents,cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents,
epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31))
or (epoch1904 and year == (1904, 1, 1))):
cell_contents = datetime.time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < datetime.MINYEAR:
cell_contents = datetime.time(*dt[3:])
else:
cell_contents = datetime.datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
#Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
#handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value,typ))
data.append(row)
if header is not None:
data[header] = _trim_excel_header(data[header])
parser = TextParser(data, header=header, index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
chunksize=chunksize,
**kwds)
output[asheetname] = parser.read()
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if com.is_integer(val):
val = int(val)
elif com.is_float(val):
val = float(val)
elif com.is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
return val
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if cls == ExcelWriter:
if engine is None:
ext = os.path.splitext(path)[-1][1:]
try:
engine = config.get_option('io.excel.%s.writer' % ext)
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
"""
Write given formated cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formated data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow: upper left cell row to dump data frame
startcol: upper left cell column to dump data frame
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# validate that this engine can handle the extension
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '%s': '%s'") %
(pprint_thing(cls.engine), pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _Openpyxl1Writer(ExcelWriter):
engine = 'openpyxl1'
supported_extensions = ('.xlsx', '.xlsm')
openpyxl_majorver = 1
def __init__(self, path, engine=None, **engine_kwargs):
if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver):
raise ValueError('Installed openpyxl is not supported at this '
'time. Use {0}.x.y.'
.format(self.openpyxl_majorver))
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_Openpyxl1Writer, self).__init__(path, **engine_kwargs)
# Create workbook object with default optimized_write=True.
self.book = Workbook()
# Openpyxl 1.6.1 adds a dummy sheet. We remove it.
if self.book.worksheets:
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell.value = _conv_value(cell.val)
style = None
if cell.style:
style = self._convert_to_style(cell.style)
for field in style.__fields__:
xcell.style.__setattr__(field,
style.__getattribute__(field))
if isinstance(cell.val, datetime.datetime):
xcell.style.number_format.format_code = self.datetime_format
elif isinstance(cell.val, datetime.date):
xcell.style.number_format.format_code = self.date_format
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
for field in style.__fields__:
xcell.style.__setattr__(
field, style.__getattribute__(field))
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict: style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
register_writer(_Openpyxl1Writer)
class _OpenpyxlWriter(_Openpyxl1Writer):
engine = 'openpyxl'
register_writer(_OpenpyxlWriter)
class _Openpyxl2Writer(_Openpyxl1Writer):
"""
Note: Support for OpenPyxl v2 is currently EXPERIMENTAL (GH7565).
"""
engine = 'openpyxl2'
openpyxl_majorver = 2
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell.value = _conv_value(cell.val)
style_kwargs = {}
# Apply format codes before cell.style to allow override
if isinstance(cell.val, datetime.datetime):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format':{'format_code': self.datetime_format}}))
elif isinstance(cell.val, datetime.date):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format':{'format_code': self.date_format}}))
if cell.style:
style_kwargs.update(self._convert_to_style_kwargs(cell.style))
if style_kwargs:
xcell.style = xcell.style.copy(**style_kwargs)
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
xcell.style = xcell.style.copy(**style_kwargs)
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{0}'.format(k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
try:
# >= 2.0.0 < 2.1.0
from openpyxl.styles import NumberFormat
return NumberFormat(**number_format_dict)
except:
# >= 2.1.0
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
register_writer(_Openpyxl2Writer)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (line_sep).join(it)
return out
else:
it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (field_sep).join(it)
return out
else:
item = "%s" % item
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
cell.val, style)
def _convert_to_style(self, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
# If there is no formatting we don't create a format object.
if num_format_str is None and style_dict is None:
return None
# Create a XlsxWriter format object.
xl_format = self.book.add_format()
if num_format_str is not None:
xl_format.set_num_format(num_format_str)
if style_dict is None:
return xl_format
# Map the cell font to XlsxWriter font properties.
if style_dict.get('font'):
font = style_dict['font']
if font.get('bold'):
xl_format.set_bold()
# Map the alignment to XlsxWriter alignment properties.
alignment = style_dict.get('alignment')
if alignment:
if (alignment.get('horizontal')
and alignment['horizontal'] == 'center'):
xl_format.set_align('center')
if (alignment.get('vertical')
and alignment['vertical'] == 'top'):
xl_format.set_align('top')
# Map the cell borders to XlsxWriter border properties.
if style_dict.get('borders'):
xl_format.set_border()
return xl_format
register_writer(_XlsxWriter)
| mit |
tapomayukh/projects_in_python | rapid_categorization/haptic_map/NIDRR/Demo_April_2014/hmm_taxel_based_nidrr.py | 1 | 14180 | #!/usr/bin/env python
# Online haptic_map implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import copy
import pickle
import optparse
import unittest
import ghmm
import ghmmwrapper
import random
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray as TaxelArray_Meka
#from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
class HMM_Model:
def __init__(self, Fmat, human_Trials, furniture_Trials, num_features, num_states):
self.F = ghmm.Float() # emission domain of HMM model
self.Fmat = Fmat
self.human_Trials = human_Trials
self.furniture_Trials = furniture_Trials
self.num_features = num_features
self.number_states = num_states
# Getting mean / covariance
def mean_cov(self, start_Trials, end_Trials):
# start_Trials = 0 for human, human_trials for furniture
# end_Trials = human_Trials for human, human_Trials + furniture_Trials for furniture
# Params
mu = {}
feature_final_data = {}
state = {}
Feature = {}
for i in range(self.num_features):
mu[i] = np.zeros((self.number_states,1))
feature_final_data[i] = [0.0]*self.number_states
state[i] = [0.0]
Feature[i] = []
i = start_Trials
while (i < end_Trials):
data_length = len(self.Fmat[i])
feature_length = data_length/self.num_features
sample_length = feature_length/self.number_states
for k in range(self.num_features):
Feature[k] = self.Fmat[i][feature_length*k:feature_length*(k+1)]
if i == start_Trials:
for k in range(self.num_features):
j = 0
while (j < self.number_states):
feature_final_data[k][j] = Feature[k][sample_length*j:sample_length*(j+1)]
j=j+1
else:
for k in range(self.num_features):
j = 0
while (j < self.number_states):
state[k] = Feature[k][sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_final_data[k][j] = feature_final_data[k][j]+state[k]
j=j+1
i = i+1
if self.num_features == 1:
cov = np.zeros((self.number_states,1))
else:
cov = np.zeros((self.number_states,self.num_features,self.num_features))
for k in range(self.num_features):
j = 0
while (j < self.number_states):
mu[k][j] = np.mean(feature_final_data[k][j])
j = j+1
if self.num_features == 1:
j = 0
while (j < self.number_states):
cov[j] = scp.std(feature_final_data[0][j])
j = j+1
elif self.num_features == 2:
j = 0
while (j < self.number_states):
cov[j] = np.cov(np.array([sum(feature_final_data[0][j],[]),sum(feature_final_data[1][j],[])]))
j = j+1
elif self.num_features == 3:
j = 0
while (j < self.number_states):
cov[j] = np.cov(np.array([sum(feature_final_data[0][j],[]),sum(feature_final_data[1][j],[])]), sum(feature_final_data[2][j]))
j = j+1
return mu, cov
def calculate_A_B_pi(self, flag):
# A - Transition Matrix
if self.number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif self.number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif self.number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif self.number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif self.number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B = [0.0]*self.number_states
if flag == 'human':
mu, cov = self.mean_cov(0, self.human_Trials)
elif flag == 'furniture':
mu, cov = self.mean_cov(self.human_Trials, (self.human_Trials + self.furniture_Trials))
if self.num_features == 1:
for num_states in range(self.number_states):
#B[num_states] = [mu[0][num_states][0],cov[num_states][0]]
B[num_states] = [mu[0][num_states][0],0.01]
elif self.num_features == 2:
for num_states in range(self.number_states):
B[num_states] = [[mu[0][num_states][0],mu[1][num_states][0]],[cov[num_states][0][0], cov[num_states][0][1], cov[num_states][1][0], cov[num_states][1][1]]]
elif self.num_features == 3:
for num_states in range(self.number_states):
B[num_states] = [[mu[0][num_states][0],mu[1][num_states][0],mu[2][num_states][0]],[cov[num_states][0][0], cov[num_states][0][1], cov[num_states][0][2], cov[num_states][1][0], cov[num_states][1][1], cov[num_states][1][2], cov[num_states][2][0], cov[num_states][2][1], cov[num_states][2][2]]]
# pi - initial probabilities per state
if self.number_states == 3:
pi = [1./3.] * 3
elif self.number_states == 5:
pi = [0.2] * 5
elif self.number_states == 10:
pi = [0.1] * 10
elif self.number_states == 15:
pi = [1./15.] * 15
elif self.number_states == 20:
pi = [0.05] * 20
#print B
return A, B, pi
def create_model(self, flag):
A, B, pi = self.calculate_A_B_pi(flag)
# generate models from parameters
if self.num_features == 1:
model = ghmm.HMMFromMatrices(self.F,ghmm.GaussianDistribution(self.F), A, B, pi) # Will be Trained
else:
model = ghmm.HMMFromMatrices(self.F,ghmm.MultivariateGaussianDistribution(self.F), A, B, pi) # Will be Trained
#print A, B, pi
#print "Model Created for:", flag
#raw_input('Press any key when ready')
return model
def train(self, model, flag, sleeve):
total_seq_old = []
total_seq = []
total_seq_old = copy.deepcopy(self.Fmat)
for i in range((self.human_Trials + self.furniture_Trials)):
total_seq_old[i][:] = sum(total_seq_old[i][:],[])
#print total_seq_old[i]
total_seq = copy.deepcopy(total_seq_old)
for i in range((self.human_Trials + self.furniture_Trials)):
seq_length = len(total_seq_old[i])
if self.num_features == 2:
j = 0
while j < seq_length/2:
total_seq[i][j] = total_seq_old[i][j]
total_seq[i][j+1] = total_seq_old[i][j+seq_length/2]
j=j+2
elif self.num_features == 3:
j = 0
while j < seq_length/3:
total_seq[i][j] = total_seq_old[i][j]
total_seq[i][j+1] = total_seq_old[i][j+seq_length/3]
total_seq[i][j+2] = total_seq_old[i][j+2*seq_length/3]
j=j+3
if flag == 'human':
start_Trials = 0
end_Trials = self.human_Trials
elif flag == 'furniture':
start_Trials = self.human_Trials
end_Trials = self.human_Trials + self.furniture_Trials
train_seq = total_seq[start_Trials:end_Trials]
#print len(train_seq)
#print "Training the HMM Model..."
final_ts = ghmm.SequenceSet(self.F,train_seq)
#print final_ts
model.baumWelch(final_ts)
#print "Model Trained: Ready to Collect Data !"
#print "Model Trained for:", flag
#print flag, "Model Trained for", sleeve, ": Ready to Collect Data !"
return model
def test(self, model, ts_obj):
# Find Viterbi Path
final_ts_obj = ghmm.EmissionSequence(self.F,ts_obj)
path_obj = model.viterbi(final_ts_obj)
return path_obj
######################################################################################################
| mit |
dch312/numpy | numpy/lib/twodim_base.py | 2 | 26775 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexes/timedeltas/test_setops.py | 15 | 2556 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, Int64Index
class TestTimedeltaIndex(object):
_multiprocess_can_split_ = True
def test_union(self):
i1 = timedelta_range('1day', periods=5)
i2 = timedelta_range('3day', periods=5)
result = i1.union(i2)
expected = timedelta_range('1day', periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d', '1d', '2d'])
ordered = TimedeltaIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
| apache-2.0 |
gfyoung/pandas | pandas/tests/dtypes/test_concat.py | 3 | 3254 | import pytest
import pandas.core.dtypes.concat as _concat
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, TimedeltaIndex
import pandas._testing as tm
@pytest.mark.parametrize(
"to_concat, expected",
[
# int/float/str
([["a"], [1, 2]], ["i", "object"]),
([[3, 4], [1, 2]], ["i"]),
([[3, 4], [1, 2.1]], ["i", "f"]),
# datetimelike
([DatetimeIndex(["2011-01-01"]), DatetimeIndex(["2011-01-02"])], ["datetime"]),
([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 days"])], ["timedelta"]),
# datetimelike object
(
[
DatetimeIndex(["2011-01-01"]),
DatetimeIndex(["2011-01-02"], tz="US/Eastern"),
],
["datetime", "datetime64[ns, US/Eastern]"],
),
(
[
DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"),
DatetimeIndex(["2011-01-02"], tz="US/Eastern"),
],
["datetime64[ns, Asia/Tokyo]", "datetime64[ns, US/Eastern]"],
),
([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 hours"])], ["timedelta"]),
(
[
DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"),
TimedeltaIndex(["1 days"]),
],
["datetime64[ns, Asia/Tokyo]", "timedelta"],
),
],
)
def test_get_dtype_kinds(index_or_series, to_concat, expected):
to_concat_klass = [index_or_series(c) for c in to_concat]
result = _concat._get_dtype_kinds(to_concat_klass)
assert result == set(expected)
@pytest.mark.parametrize(
"to_concat, expected",
[
(
[PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="M")],
["period[M]"],
),
(
[
Series([Period("2011-01", freq="M")]),
Series([Period("2011-02", freq="M")]),
],
["period[M]"],
),
(
[PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="D")],
["period[M]", "period[D]"],
),
(
[
Series([Period("2011-01", freq="M")]),
Series([Period("2011-02", freq="D")]),
],
["period[M]", "period[D]"],
),
],
)
def test_get_dtype_kinds_period(to_concat, expected):
result = _concat._get_dtype_kinds(to_concat)
assert result == set(expected)
def test_concat_mismatched_categoricals_with_empty():
# concat_compat behavior on series._values should match pd.concat on series
ser1 = Series(["a", "b", "c"], dtype="category")
ser2 = Series([], dtype="category")
result = _concat.concat_compat([ser1._values, ser2._values])
expected = pd.concat([ser1, ser2])._values
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("copy", [True, False])
def test_concat_single_dataframe_tz_aware(copy):
# https://github.com/pandas-dev/pandas/issues/25257
df = pd.DataFrame(
{"timestamp": [pd.Timestamp("2020-04-08 09:00:00.709949+0000", tz="UTC")]}
)
expected = df.copy()
result = pd.concat([df], copy=copy)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
h2oai/h2o-dev | h2o-py/h2o/estimators/pca.py | 2 | 9737 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OPrincipalComponentAnalysisEstimator(H2OEstimator):
"""
Principal Components Analysis
"""
algo = "pca"
def __init__(self, **kwargs):
super(H2OPrincipalComponentAnalysisEstimator, self).__init__()
self._parms = {}
names_list = {"model_id", "training_frame", "validation_frame", "ignored_columns", "ignore_const_cols",
"score_each_iteration", "transform", "pca_method", "pca_impl", "k", "max_iterations",
"use_all_factor_levels", "compute_metrics", "impute_missing", "seed", "max_runtime_secs",
"export_checkpoints_dir"}
if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda")
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in names_list:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
assert_is_type(training_frame, None, H2OFrame)
self._parms["training_frame"] = training_frame
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
assert_is_type(validation_frame, None, H2OFrame)
self._parms["validation_frame"] = validation_frame
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def transform(self):
"""
Transformation of training data
One of: ``"none"``, ``"standardize"``, ``"normalize"``, ``"demean"``, ``"descale"`` (default: ``"none"``).
"""
return self._parms.get("transform")
@transform.setter
def transform(self, transform):
assert_is_type(transform, None, Enum("none", "standardize", "normalize", "demean", "descale"))
self._parms["transform"] = transform
@property
def pca_method(self):
"""
Specify the algorithm to use for computing the principal components: GramSVD - uses a distributed computation of
the Gram matrix, followed by a local SVD; Power - computes the SVD using the power iteration method
(experimental); Randomized - uses randomized subspace iteration method; GLRM - fits a generalized low-rank model
with L2 loss function and no regularization and solves for the SVD using local matrix algebra (experimental)
One of: ``"gram_s_v_d"``, ``"power"``, ``"randomized"``, ``"glrm"`` (default: ``"gram_s_v_d"``).
"""
return self._parms.get("pca_method")
@pca_method.setter
def pca_method(self, pca_method):
assert_is_type(pca_method, None, Enum("gram_s_v_d", "power", "randomized", "glrm"))
self._parms["pca_method"] = pca_method
@property
def pca_impl(self):
"""
Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX - eigenvalue
decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue decompositions for symmetric matrix
using MTJ; MTJ_SVD_DENSEMATRIX - singular-value decompositions for dense matrix using MTJ; JAMA - eigenvalue
decompositions for dense matrix using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
https://github.com/fommil/matrix-toolkits-java/
One of: ``"mtj_evd_densematrix"``, ``"mtj_evd_symmmatrix"``, ``"mtj_svd_densematrix"``, ``"jama"``.
"""
return self._parms.get("pca_impl")
@pca_impl.setter
def pca_impl(self, pca_impl):
assert_is_type(pca_impl, None, Enum("mtj_evd_densematrix", "mtj_evd_symmmatrix", "mtj_svd_densematrix", "jama"))
self._parms["pca_impl"] = pca_impl
@property
def k(self):
"""
Rank of matrix approximation
Type: ``int`` (default: ``1``).
"""
return self._parms.get("k")
@k.setter
def k(self, k):
assert_is_type(k, None, int)
self._parms["k"] = k
@property
def max_iterations(self):
"""
Maximum training iterations
Type: ``int`` (default: ``1000``).
"""
return self._parms.get("max_iterations")
@max_iterations.setter
def max_iterations(self, max_iterations):
assert_is_type(max_iterations, None, int)
self._parms["max_iterations"] = max_iterations
@property
def use_all_factor_levels(self):
"""
Whether first factor level is included in each categorical expansion
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("use_all_factor_levels")
@use_all_factor_levels.setter
def use_all_factor_levels(self, use_all_factor_levels):
assert_is_type(use_all_factor_levels, None, bool)
self._parms["use_all_factor_levels"] = use_all_factor_levels
@property
def compute_metrics(self):
"""
Whether to compute metrics on the training data
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("compute_metrics")
@compute_metrics.setter
def compute_metrics(self, compute_metrics):
assert_is_type(compute_metrics, None, bool)
self._parms["compute_metrics"] = compute_metrics
@property
def impute_missing(self):
"""
Whether to impute missing entries with the column mean
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("impute_missing")
@impute_missing.setter
def impute_missing(self, impute_missing):
assert_is_type(impute_missing, None, bool)
self._parms["impute_missing"] = impute_missing
@property
def seed(self):
"""
RNG seed for initialization
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
def init_for_pipeline(self):
"""
Returns H2OPCA object which implements fit and transform method to be used in sklearn.Pipeline properly.
All parameters defined in self.__params, should be input parameters in H2OPCA.__init__ method.
:returns: H2OPCA object
"""
import inspect
from h2o.transforms.decomposition import H2OPCA
# check which parameters can be passed to H2OPCA init
var_names = list(dict(inspect.getmembers(H2OPCA.__init__.__code__))['co_varnames'])
parameters = {k: v for k, v in self._parms.items() if k in var_names}
return H2OPCA(**parameters)
| apache-2.0 |
jtorrents/networkx | examples/multigraph/chess_masters.py | 11 | 5140 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=nx.connected_component_subgraphs(G.to_undirected())
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/animation/old_animation/animation_blit_gtk.py | 3 | 1567 | #!/usr/bin/env python
from __future__ import print_function
# For detailed comments on animation and the techniques used here, see
# the wiki entry
# http://www.scipy.org/wikis/topical_software/MatplotlibAnimation
import time
import gtk, gobject
import matplotlib
matplotlib.use('GTKAgg')
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
canvas = fig.canvas
fig.subplots_adjust(left=0.3, bottom=0.3) # check for flipy bugs
ax.grid() # to ensure proper background restore
# create the initial line
x = np.arange(0,2*np.pi,0.01)
line, = ax.plot(x, np.sin(x), animated=True, lw=2)
canvas.draw()
# for profiling
tstart = time.time()
def update_line(*args):
print('you are here', update_line.cnt)
if update_line.background is None:
update_line.background = canvas.copy_from_bbox(ax.bbox)
# restore the clean slate background
canvas.restore_region(update_line.background)
# update the data
line.set_ydata(np.sin(x+update_line.cnt/10.0))
# just draw the animated artist
ax.draw_artist(line)
# just redraw the axes rectangle
canvas.blit(ax.bbox)
if update_line.cnt==1000:
# print the timing info and quit
print('FPS:' , 1000/(time.time()-tstart))
gtk.mainquit()
raise SystemExit
update_line.cnt += 1
return True
update_line.cnt = 0
update_line.background = None
def start_anim(event):
gobject.idle_add(update_line)
canvas.mpl_disconnect(start_anim.cid)
start_anim.cid = canvas.mpl_connect('draw_event', start_anim)
plt.show()
| unlicense |
cbmoore/statsmodels | statsmodels/datasets/macrodata/data.py | 25 | 3184 | """United States Macroeconomic data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
Compiled by Skipper Seabold. All data are from the Federal Reserve Bank of St.
Louis [1] except the unemployment rate which was taken from the National
Bureau of Labor Statistics [2]. ::
[1] Data Source: FRED, Federal Reserve Economic Data, Federal Reserve Bank of
St. Louis; http://research.stlouisfed.org/fred2/; accessed December 15,
2009.
[2] Data Source: Bureau of Labor Statistics, U.S. Department of Labor;
http://www.bls.gov/data/; accessed December 15, 2009.
"""
DESCRSHORT = """US Macroeconomic Data for 1959Q1 - 2009Q3"""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 203
Number of Variables - 14
Variable name definitions::
year - 1959q1 - 2009q3
quarter - 1-4
realgdp - Real gross domestic product (Bil. of chained 2005 US$,
seasonally adjusted annual rate)
realcons - Real personal consumption expenditures (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realinv - Real gross private domestic investment (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realgovt - Real federal consumption expenditures & gross investment
(Bil. of chained 2005 US$, seasonally adjusted annual rate)
realdpi - Real private disposable income (Bil. of chained 2005
US$, seasonally adjusted annual rate)
cpi - End of the quarter consumer price index for all urban
consumers: all items (1982-84 = 100, seasonally adjusted).
m1 - End of the quarter M1 nominal money stock (Seasonally
adjusted)
tbilrate - Quarterly monthly average of the monthly 3-month
treasury bill: secondary market rate
unemp - Seasonally adjusted unemployment rate (%)
pop - End of the quarter total population: all ages incl. armed
forces over seas
infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400)
realint - Real interest rate (tbilrate - infl)
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the US macro data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The macrodata Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/macrodata.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
robcarver17/pysystemtrade | systems/accounts/curves/account_curve_group.py | 1 | 5186 | from copy import copy
import pandas as pd
from syscore.dateutils import Frequency
from systems.accounts.curves.account_curve import accountCurve
from systems.accounts.curves.dict_of_account_curves import dictOfAccountCurves
from systems.accounts.pandl_calculators.pandl_generic_costs import GROSS_CURVE, NET_CURVE, COSTS_CURVE
from systems.accounts.curves.stats_dict import statsDict
class accountCurveGroup(accountCurve):
def __init__(self, dict_of_account_curves: dictOfAccountCurves,
capital,
**kwargs):
total_pandl_calculator = dict_of_account_curves.summed_pandl_calculator(capital=capital)
super().__init__(total_pandl_calculator,
**kwargs)
self._dict_of_account_curves = dict_of_account_curves
self._kwargs = _kwargs_with_defaults(kwargs)
@property
def asset_columns(self):
return list(self.dict_of_account_curves.keys())
def __getitem__(self, item):
kwargs = self.kwargs
return accountCurve(self.get_pandl_calculator_for_item(item),
**kwargs)
def get_pandl_calculator_for_item(self, item: str):
return self.dict_of_account_curves[item].pandl_calculator_with_costs
def to_frame(self) -> pd.DataFrame:
asset_columns = self.asset_columns
data_as_list = [self[asset_name] for asset_name in asset_columns]
data_as_pd = pd.concat(data_as_list, axis=1)
data_as_pd.columns = asset_columns
return data_as_pd
def get_stats(self, stat_name:str, curve_type:str = "net", freq:str = "daily") -> statsDict:
return statsDict(self, item=stat_name, freq=freq, curve_type=curve_type)
## TO RETURN A 'NEW' ACCOUNT CURVE GROUP
@property
def gross(self):
kwargs = self.kwargs_without_item('curve_type')
return accountCurveGroup(self.dict_of_account_curves,
capital = self.capital,
curve_type=GROSS_CURVE,
**kwargs)
@property
def net(self):
kwargs = self.kwargs_without_item('curve_type')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
curve_type=NET_CURVE,
**kwargs)
@property
def costs(self):
kwargs = self.kwargs_without_item('curve_type')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
curve_type=COSTS_CURVE,
**kwargs)
@property
def daily(self):
kwargs = self.kwargs_without_item('frequency')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
frequency=Frequency.BDay,
**kwargs)
@property
def weekly(self):
kwargs = self.kwargs_without_item('frequency')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
frequency=Frequency.Week,
**kwargs)
@property
def monthly(self):
kwargs = self.kwargs_without_item('frequency')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
frequency=Frequency.Month,
**kwargs)
@property
def annual(self):
kwargs = self.kwargs_without_item('frequency')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
frequency=Frequency.Year,
**kwargs)
@property
def percent(self):
kwargs = self.kwargs_without_item('is_percentage')
return accountCurveGroup(self.dict_of_account_curves,
capital=self.capital,
is_percentage=True,
**kwargs)
@property
def value_terms(self):
kwargs = self.kwargs_without_item('is_percentage')
return accountCurveGroup(self.dict_of_account_curves,
capital = self.capital,
is_percentage=False,
**kwargs)
@property
def dict_of_account_curves(self) -> dictOfAccountCurves:
return self._dict_of_account_curves
def kwargs_without_item(self, itemname):
kwargs = copy(self.kwargs)
kwargs.pop(itemname)
return kwargs
@property
def kwargs(self) -> dict:
return self._kwargs
def _kwargs_with_defaults(kwargs: dict) -> dict:
if 'frequency' not in kwargs:
kwargs['frequency'] = Frequency.BDay
if 'curve_type' not in kwargs:
kwargs['curve_type'] = NET_CURVE
if 'is_percentage' not in kwargs:
kwargs['is_percentage'] = False
if 'weighted' not in kwargs:
kwargs['weighted'] = False
return kwargs
| gpl-3.0 |
pombredanne/django-cachalot | benchmark.py | 3 | 10323 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals, print_function
from collections import OrderedDict
import io
import os
import platform
from random import choice
import re
import sqlite3
from subprocess import check_output
from time import time
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import django
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.cache import get_cache
from django.db import connections, connection
from django.test.utils import CaptureQueriesContext, override_settings
from django.utils.encoding import force_text
import matplotlib.pyplot as plt
import MySQLdb
import pandas as pd
import psycopg2
import cachalot
from cachalot.api import invalidate_all
from cachalot.tests.models import Test
RESULTS_PATH = 'benchmark/'
CONTEXTS = ('Control', 'Cold cache', 'Hot cache')
def write_conditions():
versions = OrderedDict()
# CPU
with open('/proc/cpuinfo') as f:
versions['CPU'] = re.search(r'^model name\s+: (.+)$', f.read(),
flags=re.MULTILINE).group(1)
# RAM
with open('/proc/meminfo') as f:
versions['RAM'] = re.search(r'^MemTotal:\s+(.+)$', f.read(),
flags=re.MULTILINE).group(1)
# OS
linux_dist = ' '.join(platform.linux_distribution()).strip()
if linux_dist:
versions['Linux distribution'] = linux_dist
else:
versions['OS'] = platform.system() + ' ' + platform.release()
versions.update((
('Python', platform.python_version()),
('Django', django.get_version()),
('cachalot', cachalot.version_string),
('sqlite', sqlite3.sqlite_version),
))
# PostgreSQL
cursor = connections['postgresql'].cursor()
cursor.execute('SELECT version();')
versions['PostgreSQL'] = re.match(r'^PostgreSQL ([\d\.]+) on .+$',
cursor.fetchone()[0]).group(1)
cursor.close()
# MySQL
cursor = connections['mysql'].cursor()
cursor.execute('SELECT version();')
versions['MySQL'] = cursor.fetchone()[0].split('-')[0]
cursor.close()
# Redis
out = force_text(check_output(['redis-cli',
'INFO', 'server'])).replace('\r', '')
versions['Redis'] = re.search(r'^redis_version:([\d\.]+)$', out,
flags=re.MULTILINE).group(1)
# memcached
out = force_text(check_output(['memcached', '-h']))
versions['memcached'] = re.match(r'^memcached ([\d\.]+)$', out,
flags=re.MULTILINE).group(1)
versions.update((
('psycopg2', psycopg2.__version__.split()[0]),
('MySQLdb', MySQLdb.__version__),
))
with io.open(os.path.join('benchmark', 'conditions.rst'), 'w') as f:
f.write('In this benchmark, a small database is generated, '
'and each test is executed %s times '
'under the following conditions:\n\n' % Benchmark.n)
def write_table_sep(char='='):
f.write(''.ljust(20, char) + ' ' + ''.ljust(50, char) + '\n')
write_table_sep()
for k, v in versions.items():
f.write(k.ljust(20) + ' ' + v + '\n')
write_table_sep()
class AssertNumQueries(CaptureQueriesContext):
def __init__(self, n, using=None):
self.n = n
self.using = using
super(AssertNumQueries, self).__init__(self.get_connection())
def get_connection(self):
if self.using is None:
return connection
return connections[self.using]
def __exit__(self, exc_type, exc_val, exc_tb):
super(AssertNumQueries, self).__exit__(exc_type, exc_val, exc_tb)
if len(self) != self.n:
print('The amount of queries should be %s, but %s were captured.'
% (self.n, len(self)))
class Benchmark(object):
n = 20
def __init__(self):
self.data = []
def bench_once(self, context, num_queries, invalidate_before=False):
for _ in range(self.n):
if invalidate_before:
invalidate_all(db_alias=self.db_alias)
with AssertNumQueries(num_queries, using=self.db_alias):
start = time()
self.query_function(self.db_alias)
end = time()
self.data.append(
{'query': self.query_name,
'time': end - start,
'context': context,
'db': self.db_vendor,
'cache': self.cache_name})
def benchmark(self, query_str, to_list=True, num_queries=1):
self.query_name = query_str
query_str = 'Test.objects.using(using)' + query_str
if to_list:
query_str = 'list(%s)' % query_str
self.query_function = eval('lambda using: ' + query_str)
with override_settings(CACHALOT_ENABLED=False):
self.bench_once(CONTEXTS[0], num_queries)
self.bench_once(CONTEXTS[1], num_queries, invalidate_before=True)
self.bench_once(CONTEXTS[2], 0)
def execute_benchmark(self):
self.benchmark('.count()', to_list=False)
self.benchmark('.first()', to_list=False)
self.benchmark('[:10]')
self.benchmark('[5000:5010]')
self.benchmark(".filter(name__icontains='e')[0:10]")
self.benchmark(".filter(name__icontains='e')[5000:5010]")
self.benchmark(".order_by('owner')[0:10]")
self.benchmark(".order_by('owner')[5000:5010]")
self.benchmark(".select_related('owner')[0:10]")
self.benchmark(".select_related('owner')[5000:5010]")
self.benchmark(".prefetch_related('owner__groups')[0:10]",
num_queries=3)
self.benchmark(".prefetch_related('owner__groups')[5000:5010]",
num_queries=3)
def run(self):
for db_alias in settings.DATABASES:
self.db_alias = db_alias
self.db_vendor = connections[self.db_alias].vendor
print('Benchmarking %s…' % self.db_vendor)
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
self.cache_name = cache.__class__.__name__[:-5].lower()
with override_settings(CACHALOT_CACHE=cache_alias):
self.execute_benchmark()
self.df = pd.DataFrame.from_records(self.data)
if not os.path.exists(RESULTS_PATH):
os.mkdir(RESULTS_PATH)
self.df.to_csv(os.path.join(RESULTS_PATH, 'data.csv'))
self.xlim = (0, self.df['time'].max() * 1.01)
self.output('db')
self.output('cache')
def output(self, param):
gp = self.df.groupby(('context', 'query', param))['time']
self.means = gp.mean().unstack().unstack().reindex(CONTEXTS)
los = self.means - gp.min().unstack().unstack().reindex(CONTEXTS)
ups = gp.max().unstack().unstack().reindex(CONTEXTS) - self.means
self.errors = dict(
(key, dict(
(subkey,
[[los[key][subkey][context] for context in self.means.index],
[ups[key][subkey][context] for context in self.means.index]])
for subkey in self.means.columns.levels[1]))
for key in self.means.columns.levels[0])
self.get_perfs(param)
self.plot_detail(param)
gp = self.df.groupby(('context', param))['time']
self.means = gp.mean().unstack().reindex(CONTEXTS)
los = self.means - gp.min().unstack().reindex(CONTEXTS)
ups = gp.max().unstack().reindex(CONTEXTS) - self.means
self.errors = [
[[los[key][context] for context in self.means.index],
[ups[key][context] for context in self.means.index]]
for key in self.means]
self.plot_general(param)
def get_perfs(self, param):
with io.open(os.path.join(RESULTS_PATH, param + '_results.rst'),
'w') as f:
for v in self.means.columns.levels[0]:
g = self.means[v].mean(axis=1)
perf = ('%s is %.1f× slower then %.1f× faster'
% (v.ljust(10), g[CONTEXTS[1]] / g[CONTEXTS[0]],
g[CONTEXTS[0]] / g[CONTEXTS[2]]))
print(perf)
f.write('- %s\n' % perf)
def plot_detail(self, param):
for v in self.means.columns.levels[0]:
plt.figure()
axes = self.means[v].plot(
kind='barh', xerr=self.errors[v],
xlim=self.xlim, figsize=(15, 15), subplots=True, layout=(6, 2),
sharey=True, legend=False)
plt.gca().invert_yaxis()
for row in axes:
for ax in row:
ax.set_ylabel('')
ax.set_xlabel('Time (s)')
plt.savefig(os.path.join(RESULTS_PATH, '%s_%s.svg' % (param, v)))
def plot_general(self, param):
plt.figure()
self.means.plot(kind='barh', xerr=self.errors, xlim=self.xlim)
plt.gca().invert_yaxis()
plt.ylabel('')
plt.xlabel('Time (s)')
plt.savefig(os.path.join(RESULTS_PATH, '%s.svg' % param))
def create_data(using):
User.objects.using(using).bulk_create(
[User(username='user%d' % i) for i in range(50)])
Group.objects.using(using).bulk_create(
[Group(name='test%d' % i) for i in range(10)])
groups = list(Group.objects.using(using))
for u in User.objects.using(using):
u.groups.add(choice(groups), choice(groups))
users = list(User.objects.using(using))
Test.objects.using(using).bulk_create(
[Test(name='test%d' % i, owner=choice(users)) for i in range(10000)])
if __name__ == '__main__':
if django.VERSION[:2] >= (1, 7):
django.setup()
write_conditions()
old_db_names = {}
for alias in connections:
conn = connections[alias]
old_db_names[alias] = conn.settings_dict['NAME']
conn.creation.create_test_db(autoclobber=True)
print("Populating database '%s'…" % alias)
create_data(alias)
Benchmark().run()
for alias in connections:
connections[alias].creation.destroy_test_db(old_db_names[alias])
| bsd-3-clause |
scottpurdy/NAB | nab/plot.py | 4 | 17555 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plotting utility."""
import argparse
import itertools
import os
import sys
import tempfile
import pandas as pd
import plotly.offline
import plotly.plotly
from plotly.graph_objs import (
Bar, Data, Figure, Layout, Line, Margin, Marker, Scatter, XAxis, YAxis)
try:
import simplejson as json
except ImportError:
import json
MARKERS = ("circle", "diamond", "square", "cross", "triangle-up", "hexagon",
"triangle-down")
WIDTH = 800
HEIGHT = 500
SCALE = 3.0
def getJSONData(jsonPath):
with open(jsonPath) as f:
dataDict = json.load(f)
return dataDict
def getCSVData(dataPath):
try:
data = pd.read_csv(dataPath)
except IOError("Invalid path to data file."):
return
return data
class PlotNAB(object):
"""Plot NAB data and results files with the plotly API."""
def __init__(self,
apiKey=None,
username=None,
dataFile=None,
dataName=None,
offline=False):
self.offline = offline
if offline:
self.py = plotly.offline
else:
self.py = plotly.plotly
self._plotly_sign_in(self.py, username, apiKey)
self._setupDirectories()
self._getThresholds()
# Setup data
self.dataFile = dataFile
self.dataName = dataName
self.dataPath = os.path.join(self.dataDir, dataFile)
self.rawData = getCSVData(self.dataPath) if self.dataPath else None
@staticmethod
def _plotly_sign_in(py, username=None, apiKey=None):
try:
apiKey = apiKey if apiKey else os.environ["PLOTLY_API_KEY"]
except:
print ("Missing PLOTLY_API_KEY environment variable. If you have a "
"key, set it with $ export PLOTLY_API_KEY=api_key\n"
"You can retrieve a key by registering for the Plotly API at "
"http://www.plot.ly")
raise OSError("Missing API key.")
try:
username = username if username else os.environ["PLOTLY_USERNAME"]
except:
print ("Missing PLOTLY_USERNAME environment variable. If you have a "
"username, set it with $ export PLOTLY_USERNAME=username\n"
"You can sign up for the Plotly API at http://www.plot.ly")
raise OSError("Missing username.")
py.sign_in(username, apiKey)
def _setupDirectories(self):
root = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
self.configDir = os.path.abspath(os.path.join(root, "config"))
self.dataDir = os.path.abspath(os.path.join(root, "data"))
self.labelsDir = os.path.abspath(os.path.join(root, "labels"))
self.resultsDir = os.path.abspath(os.path.join(root, "results"))
def _getThresholds(self):
thresholdsPath = os.path.join(self.configDir, "thresholds.json")
with open(thresholdsPath) as f:
self.thresholds = json.load(f)
@staticmethod
def _addValues(data, start=None, end=None):
"""Return data values trace."""
if start is None:
start = data["timestamp"][0]
if end is None:
end = data["timestamp"].iloc[-1]
mask = ((data["timestamp"] >= start) &
(data["timestamp"] <= end))
return Scatter(x=data["timestamp"][mask],
y=data["value"][mask],
name="value",
line=Line(
width=1.5
),
showlegend=False)
@staticmethod
def _addScores(data, value, title, start=None, end=None):
"""return data values trace."""
if start is None:
start = data["timestamp"][0]
if end is None:
end = data["timestamp"].iloc[-1]
mask = ((data["timestamp"] >= start) &
(data["timestamp"] <= end))
return Scatter(x=data["timestamp"][mask],
y=data[value][mask],
name=title,
showlegend=False)
@staticmethod
def _addLabels(data, labels, target="value", start=None, end=None):
"""return plotly trace for anomaly labels."""
if start is None:
start = data["timestamp"][0]
if end is None:
end = data["timestamp"].iloc[-1]
x = []
y = []
for label in labels:
row = data[data.timestamp == label]
if ((row["timestamp"] >= start).values[0] and
(row["timestamp"] <= end).values[0]):
x.append(row["timestamp"])
y.append(row[target])
if x:
x = pd.concat(x)
y = pd.concat(y)
return Scatter(x=x,
y=y,
mode="markers",
name="Ground Truth Anomaly",
text=["Anomalous Instance"],
marker=Marker(
color="rgb(200, 20, 20)",
size=10,
symbol=MARKERS[0]
))
def _addWindows(self, start=None, end=None):
"""Return plotly trace for anomaly windows."""
if start is None:
start = self.rawData["timestamp"][0]
if end is None:
end = self.rawData["timestamp"].iloc[-1]
mask = ((self.rawData["timestamp"] >= start) &
(self.rawData["timestamp"] <= end))
windows = getJSONData(
os.path.join(self.labelsDir, "combined_windows.json"))[self.dataFile]
x = []
delta = (pd.to_datetime(self.rawData["timestamp"].iloc[1]) -
pd.to_datetime(self.rawData["timestamp"].iloc[0]))
minutes = int(delta.total_seconds() / 60)
for window in windows:
windowStart = max(pd.to_datetime(window[0]), pd.to_datetime(start))
windowEnd = min(pd.to_datetime(window[1]), pd.to_datetime(end))
x.extend(pd.date_range(windowStart, windowEnd, freq=str(minutes) + "Min").tolist())
maxVal = self.rawData.value.max()
y = [maxVal for _ in x]
return Bar(x=x,
y=y,
name="Anomaly Window",
marker=Marker(
color="rgb(220, 100, 100)"
),
opacity=0.3)
def _addProbation(self, start=None, end=None):
if start is None:
start = self.rawData["timestamp"][0]
if end is None:
end = self.rawData["timestamp"].iloc[-1]
mask = ((self.rawData["timestamp"] >= start) &
(self.rawData["timestamp"] <= end))
length = min(int(0.15 * len(self.rawData)), 750)
x = self.rawData["timestamp"].iloc[:length][mask]
y = [self.rawData.value.max() for _ in x]
return Bar(x=x,
y=y,
name="Probationary Period",
marker=Marker(
color="rgb(0, 0, 200)"
),
opacity=0.2)
@staticmethod
def _createLayout(title=None, xLabel="Date", yLabel="Metric", fontSize=12,
width=WIDTH, height=HEIGHT):
"""Return plotly Layout object."""
layoutArgs = {
"title": title,
"font": {"size": fontSize},
"showlegend": False,
"width": width,
"height": height,
"xaxis": XAxis(
title=xLabel,
),
"yaxis": YAxis(
title=yLabel,
domain=[0, 1],
autorange=True,
autotick=True,
),
"barmode": "stack",
"bargap": 0}
margins = {"l": 70, "r": 30, "b": 50, "t": 90, "pad": 4}
if title is None:
margins["t"] -= 70
if fontSize > 12:
margins["l"] += (fontSize - 12) * 3
margins["b"] += (fontSize - 12) * 3
layoutArgs["margin"] = margins
return Layout(**layoutArgs)
def setDataFile(self, filename):
"""Set the data file name; i.e. path from self.dataDir."""
self.dataFile = filename
def setDataName(self, name):
"""Set the name of this data; prints to plot title."""
self.dataName = name
def getDataInfo(self):
"""Return member variables dataFile, dataName, and dataPath."""
return {"dataFile": self.dataFile,
"dataName": self.dataName,
"dataPath": self.dataPath}
def plotMultipleDetectors(self,
resultsPaths,
detectors=["numenta"],
scoreProfile="standard",
withLabels=True,
withWindows=True,
withProbation=True):
"""
Plot detector results on a data file.
TODO: auto-generate paths from dataFile and detectors.
"""
if scoreProfile is (not "standard"
or not "reward_low_fn_rate"
or not "reward_low_fp_rate"):
raise ValueError("Invalid scoring profile. Must be one of \'standard\' "
"or \'reward low fn rate\' or \'reward low fp rate\'.")
if self.rawData is None:
self.rawData = getCSVData(self.dataPath)
traces = []
traces.append(self._addValues(self.rawData))
# Anomaly detections traces:
for i,d in enumerate(detectors):
threshold = self.thresholds[d][scoreProfile]["threshold"]
resultsData = getCSVData(os.path.join(self.resultsDir, resultsPaths[i]))
FP, TP = self._parseDetections(resultsData, threshold)
fpTrace, tpTrace = self._addDetections(
"Detection by " + d, MARKERS[i+1], FP, TP)
traces.append(fpTrace)
traces.append(tpTrace)
if withLabels:
labels = getJSONData(os.path.join(
self.labelsDir, "combined_labels.json"))[self.dataFile]
traces.append(self._addLabels(self.rawData, labels, target="value"))
if withWindows:
traces.append(self._addWindows())
if withProbation:
traces.append(self._addProbation())
# Create plotly Data and Layout objects:
data = Data(traces)
layout = self._createLayout("Anomaly Detections for " + self.dataName)
# Query plotly
fig = Figure(data=data, layout=layout)
plot_url = self.py.plot(fig)
print "Detections plot URL: ", plot_url
return plot_url
def plot(self,
value="value",
fontSize=12,
start=None,
end=None,
xLabel=None,
yLabel=None,
width=WIDTH,
height=HEIGHT,
withLabels=False,
withWindows=False,
withProbation=False,
plotPath=None):
"""Plot the data stream."""
if value == "value":
if yLabel is None:
yLabel = "Metric"
elif value == "raw":
value = "raw_score"
if yLabel is None:
yLabel = "Prediction Error"
elif value == "likelihood":
value = "anomaly_score"
if yLabel is None:
yLabel = "Anomaly Likelihood"
else:
raise ValueError("Unknown value type '%s'".format(value))
detector = "numenta"
dataDir, dataFile = os.path.split(self.dataPath)
dataDir = os.path.split(dataDir)[1]
resultsFile = detector + "_" + dataFile
resultsPath = os.path.join(os.path.dirname(__file__), os.path.pardir, "results", detector, dataDir, resultsFile)
resultsData = getCSVData(resultsPath)
traces = []
traces.append(self._addScores(
resultsData, value, yLabel, start, end))
if withLabels:
labels = getJSONData(os.path.join(
self.labelsDir, "combined_labels.json"))[self.dataFile]
traces.append(self._addLabels(resultsData, labels, target=value, start=start, end=end))
if withWindows:
traces.append(self._addWindows(start=start, end=end))
if withProbation:
traces.append(self._addProbation(start=start, end=end))
# Create plotly Data and Layout objects:
data = Data(traces)
layout = self._createLayout(self.dataName, xLabel=xLabel, yLabel=yLabel, fontSize=fontSize, width=width, height=height)
# Query plotly
fig = Figure(data=data, layout=layout)
if plotPath is None:
# We temporarily switch to a temp directory to avoid overwriting the
# previous plot when in offline mode.
cwd = os.getcwd()
tempBase = os.path.join(cwd, "plot_")
tempDir = tempfile.mkdtemp(prefix=tempBase)
try:
os.chdir(tempDir)
plotPath = self.py.plot(fig)
print "Data plot URL: ", plotPath
finally:
os.chdir(cwd)
else:
self.py.image.save_as(fig, plotPath, width=width, height=height, scale=SCALE)
return plotPath
def _parseDetections(self, resultsData, threshold):
"""Return false positives and true positives."""
windows = getJSONData(
os.path.join(self.labelsDir, "combined_windows.json"))[self.dataFile]
detections = resultsData[resultsData["anomaly_score"] >= threshold]
FP = detections[detections["label"] == 0]
TP = []
for window in windows:
start = pd.to_datetime(window[0])
end = pd.to_datetime(window[1])
detection = self.getTPDetection(detections, (start, end))
if detection:
TP.append(detection)
return FP, TP
@staticmethod
def getTPDetection(detections, windowTimes):
"""Returns the first occurence of a detection w/in the window times."""
for detection in detections.iterrows():
detectionTime = pd.to_datetime(detection[1]["timestamp"])
if detectionTime > windowTimes[0] and detectionTime < windowTimes[1]:
return detection
return None
def _addDetections(self, name, symbol, FP, TP):
"""Plot markers at anomaly detections; standard is for open shapes."""
symbol = symbol + "-open"
# FPs:
fpTrace = Scatter(x=FP["timestamp"],
y=FP["value"],
mode="markers",
name=name,
text=["anomalous data"],
marker=Marker(
color="rgb(200, 20, 20)",
size=15.0,
symbol=symbol,
line=Line(
color="rgb(200, 20, 20)",
width=2
)
))
# TPs:
tpTrace = Scatter(x=[tp[1]["timestamp"] for tp in TP],
y=[tp[1]["value"] for tp in TP],
mode="markers",
name=name,
text=["anomalous data"],
marker=Marker(
color="rgb(20, 200, 20)",
size=15.0,
symbol=symbol,
line=Line(
color="rgb(20, 200, 20)",
width=2
)
))
return fpTrace, tpTrace
def main():
"""Command-line script entry point.
Usage:
nab-plot --title="Machine Temperature Sensor Data" realKnownCause/machine_temperature_system_failure.csv
"""
parser = argparse.ArgumentParser()
# Content
parser.add_argument("--value", dest="value", default="value",
choices=("value", "raw", "likelihood"))
parser.add_argument("--start", dest="start", default=None)
parser.add_argument("--end", dest="end", default=None)
parser.add_argument("--labels", dest="labels", action="store_true")
parser.add_argument("--no-labels", dest="labels", action="store_false")
parser.set_defaults(labels=True)
parser.add_argument("--windows", dest="windows", action="store_true")
parser.add_argument("--probation", dest="probation", action="store_true")
# Layout
parser.add_argument("--title", dest="title")
parser.add_argument("--xLabel", default="Date")
parser.add_argument("--no-xLabel", dest="xLabel", action="store_const",
const=None)
parser.add_argument("--yLabel", dest="yLabel")
parser.add_argument("--fontSize", dest="fontSize", default=12, type=int, required=False)
parser.add_argument("--width", dest="width", default=WIDTH, type=int)
parser.add_argument("--height", default=HEIGHT, type=int)
# Misc.
parser.add_argument("--offline", dest="offline", action="store_true")
parser.add_argument("--output", dest="output", default=None)
# Which data set to plot
parser.add_argument("file")
args = parser.parse_args()
if args.offline and args.output is not None:
print "Plots cannot be saved to file in offline mode."
sys.exit(-1)
path = args.file
title = args.title
labels = args.labels
windows = args.windows
probation = args.probation
offline = args.offline
output = args.output
dataPlotter = PlotNAB(dataFile=path, dataName=title, offline=offline)
dataPlotter.plot(
value=args.value,
fontSize=args.fontSize,
start=args.start,
end=args.end,
xLabel=args.xLabel,
yLabel=args.yLabel,
width=args.width,
height=args.height,
withLabels=labels,
withWindows=windows,
withProbation=probation,
plotPath=output,
)
| agpl-3.0 |
klocey/hydrobide | figure_code/MacroecologyPatterns/DiversityAbundanceScaling.py | 8 | 3887 | from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import sys
import scipy as sc
from scipy import stats
import statsmodels.stats.api as sms
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import summary_table
mydir = os.path.expanduser('~/GitHub/Emergence')
sys.path.append(mydir+'/tools')
mydir2 = os.path.expanduser("~/")
def xfrm(X, _max): return -np.log10(_max - np.array(X))
def figplot(x, y, xlab, ylab, fig, n, binned = 1):
'''main figure plotting function'''
fig.add_subplot(2, 2, n)
y2 = list(y)
x2 = list(x)
if binned == 1:
X, Y = (np.array(t) for t in zip(*sorted(zip(x2, y2))))
Xi = xfrm(X, max(X)*1.05)
bins = np.linspace(np.min(Xi), np.max(Xi)+1, 100)
ii = np.digitize(Xi, bins)
y2 = np.array([np.mean(Y[ii==i]) for i in range(1, len(bins)) if len(Y[ii==i]) > 0])
x2 = np.array([np.mean(X[ii==i]) for i in range(1, len(bins)) if len(X[ii==i]) > 0])
d = pd.DataFrame({'x': list(x2)})
d['y'] = list(y2)
f = smf.ols('y ~ x', d).fit()
m, b, r, p, std_err = stats.linregress(x2, y2)
st, data, ss2 = summary_table(f, alpha=0.05)
fitted = data[:,2]
mean_ci_low, mean_ci_upp = data[:,4:6].T
ci_low, ci_upp = data[:,6:8].T
x2, y2, fitted, ci_low, ci_upp = zip(*sorted(zip(x2, y2, fitted, ci_low, ci_upp)))
if n == 1:
lbl = r'$rarity$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+r'$r^2$' + '=' +str(round(r**2,2))
elif n == 2:
lbl = r'$Nmax$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+r'$r^2$' + '=' +str(round(r**2,2))
elif n == 3:
lbl = r'$Ev$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+ r'$r^2$' + '=' + str(round(r**2,2))
elif n == 4:
lbl = r'$S$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+r'$r^2$' + '=' + str(round(r**2,2))
plt.scatter(x2, y2, color = 'SkyBlue', alpha= 1 , s = 12, linewidths=0.5, edgecolor='Steelblue', label=lbl)
if n == 3:
plt.legend(loc='best', fontsize=6, frameon=False)
else:
plt.legend(loc=2, fontsize=6, frameon=False)
plt.fill_between(x2, ci_upp, ci_low, color='b', lw=0.1, alpha=0.15)
plt.plot(x2, fitted, color='b', ls='--', lw=1.0, alpha=0.9)
plt.xlabel(xlab, fontsize=11)
plt.ylabel(ylab, fontsize=11)
plt.tick_params(axis='both', labelsize=6)
plt.xlim(0.9*min(x2), 1.1*max(x2))
plt.ylim(min(ci_low), max(ci_upp))
return fig
df = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
df2 = pd.DataFrame({'length' : df['length']})
df2['N'] = np.log10(df['total.abundance'].groupby(df['sim']).max())
df2['D'] = np.log10(df['N.max'].groupby(df['sim']).max())
df2['S'] = np.log10(df['species.richness'].groupby(df['sim']).max())
df2['E'] = np.log10(df['simpson.e'].groupby(df['sim']).min())
df2['R'] = np.log10(df['logmod.skew'].groupby(df['sim']).max())
df2 = df2.replace([np.inf, -np.inf], np.nan).dropna()
fig = plt.figure(figsize=(5, 4))
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Rarity, '+r'$log_{10}$'
fig = figplot(df2['N'], df2['R'], xlab, ylab, fig, 1)
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Dominance, '+r'$log_{10}$'
fig = figplot(df2['N'], df2['D'], xlab, ylab, fig, 2)
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Evenness, ' +r'$log_{10}$'
fig = figplot(df2['N'], df2['E'], xlab, ylab, fig, 3)
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Richness, ' +r'$log_{10}$'
fig = figplot(df2['N'], df2['S'], xlab, ylab, fig, 4)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/DiversityAbundanceScaling.png', dpi=600, bbox_inches = "tight")
plt.close()
| mit |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/IPython/terminal/ipapp.py | 5 | 13824 | #!/usr/bin/env python
# encoding: utf-8
"""
The :class:`~IPython.core.application.Application` object for the command
line :command:`ipython` program.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
from traitlets.config.loader import Config
from traitlets.config.application import boolean_flag, catch_config_error, Application
from IPython.core import release
from IPython.core import usage
from IPython.core.completer import IPCompleter
from IPython.core.crashhandler import CrashHandler
from IPython.core.formatters import PlainTextFormatter
from IPython.core.history import HistoryManager
from IPython.core.prompts import PromptManager
from IPython.core.application import (
ProfileDir, BaseIPythonApplication, base_flags, base_aliases
)
from IPython.core.magics import ScriptMagics
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.extensions.storemagic import StoreMagics
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.utils import warn
from IPython.paths import get_ipython_dir
from traitlets import (
Bool, List, Dict,
)
#-----------------------------------------------------------------------------
# Globals, utilities and helpers
#-----------------------------------------------------------------------------
_examples = """
ipython --matplotlib # enable matplotlib integration
ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
ipython --log-level=DEBUG # set logging to DEBUG
ipython --profile=foo # start with profile foo
ipython profile create foo # create profile foo w/ default config files
ipython help profile # show the help for the profile subcmd
ipython locate # print the path to the IPython directory
ipython locate profile foo # print the path to the directory for profile `foo`
"""
#-----------------------------------------------------------------------------
# Crash handler for this application
#-----------------------------------------------------------------------------
class IPAppCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.author
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(IPAppCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
# Start with parent report
report = [super(IPAppCrashHandler, self).make_report(traceback)]
# Add interactive-specific info we may have
rpt_add = report.append
try:
rpt_add(sec_sep+"History of session input:")
for line in self.app.shell.user_ns['_ih']:
rpt_add(line)
rpt_add('\n*** Last line of input (may not be in above history):\n')
rpt_add(self.app.shell._last_input_line+'\n')
except:
pass
return ''.join(report)
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags.update(shell_flags)
frontend_flags = {}
addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
'Turn on auto editing of files with syntax errors.',
'Turn off auto editing of files with syntax errors.'
)
addflag('banner', 'TerminalIPythonApp.display_banner',
"Display a banner upon starting IPython.",
"Don't display a banner upon starting IPython."
)
addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
"""Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
"Don't prompt the user when exiting."
)
addflag('term-title', 'TerminalInteractiveShell.term_title',
"Enable auto setting the terminal title.",
"Disable auto setting the terminal title."
)
classic_config = Config()
classic_config.InteractiveShell.cache_size = 0
classic_config.PlainTextFormatter.pprint = False
classic_config.PromptManager.in_template = '>>> '
classic_config.PromptManager.in2_template = '... '
classic_config.PromptManager.out_template = ''
classic_config.InteractiveShell.separate_in = ''
classic_config.InteractiveShell.separate_out = ''
classic_config.InteractiveShell.separate_out2 = ''
classic_config.InteractiveShell.colors = 'NoColor'
classic_config.InteractiveShell.xmode = 'Plain'
frontend_flags['classic']=(
classic_config,
"Gives IPython a similar feel to the classic Python prompt."
)
# # log doesn't make so much sense this way anymore
# paa('--log','-l',
# action='store_true', dest='InteractiveShell.logstart',
# help="Start logging to the default log file (./ipython_log.py).")
#
# # quick is harder to implement
frontend_flags['quick']=(
{'TerminalIPythonApp' : {'quick' : True}},
"Enable quick startup with no config files."
)
frontend_flags['i'] = (
{'TerminalIPythonApp' : {'force_interact' : True}},
"""If running code from the command line, become interactive afterwards.
It is often useful to follow this with `--` to treat remaining flags as
script arguments.
"""
)
flags.update(frontend_flags)
aliases = dict(base_aliases)
aliases.update(shell_aliases)
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class LocateIPythonApp(BaseIPythonApplication):
description = """print the path to the IPython dir"""
subcommands = Dict(dict(
profile=('IPython.core.profileapp.ProfileLocate',
"print the path to an IPython profile directory",
),
))
def start(self):
if self.subapp is not None:
return self.subapp.start()
else:
print(self.ipython_dir)
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
name = u'ipython'
description = usage.cl_usage
crash_handler_class = IPAppCrashHandler
examples = _examples
flags = Dict(flags)
aliases = Dict(aliases)
classes = List()
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
PromptManager,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
StoreMagics,
]
deprecated_subcommands = dict(
qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
),
notebook=('notebook.notebookapp.NotebookApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
),
console=('jupyter_console.app.ZMQTerminalIPythonApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
),
nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
"DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
),
trust=('nbformat.sign.TrustNotebookApp',
"DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
),
kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
"DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
),
)
subcommands = dict(
profile = ("IPython.core.profileapp.ProfileApp",
"Create and manage IPython profiles."
),
kernel = ("ipykernel.kernelapp.IPKernelApp",
"Start a kernel without an attached frontend."
),
locate=('IPython.terminal.ipapp.LocateIPythonApp',
LocateIPythonApp.description
),
history=('IPython.core.historyapp.HistoryApp',
"Manage the IPython history database."
),
)
deprecated_subcommands['install-nbextension'] = (
"notebook.nbextensions.InstallNBExtensionApp",
"DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
)
subcommands.update(deprecated_subcommands)
# *do* autocreate requested profile, but don't create the config file.
auto_create=Bool(True)
# configurables
quick = Bool(False, config=True,
help="""Start IPython quickly by skipping the loading of config files."""
)
def _quick_changed(self, name, old, new):
if new:
self.load_config_file = lambda *a, **kw: None
display_banner = Bool(True, config=True,
help="Whether to display a banner upon starting IPython."
)
# if there is code of files to run from the cmd line, don't interact
# unless the --i flag (App.force_interact) is true.
force_interact = Bool(False, config=True,
help="""If a command or file is given via the command-line,
e.g. 'ipython foo.py', start an interactive shell after executing the
file or command."""
)
def _force_interact_changed(self, name, old, new):
if new:
self.interact = True
def _file_to_run_changed(self, name, old, new):
if new:
self.something_to_run = True
if new and not self.force_interact:
self.interact = False
_code_to_run_changed = _file_to_run_changed
_module_to_run_changed = _file_to_run_changed
# internal, not-configurable
interact=Bool(True)
something_to_run=Bool(False)
def parse_command_line(self, argv=None):
"""override to allow old '-pylab' flag with deprecation warning"""
argv = sys.argv[1:] if argv is None else argv
if '-pylab' in argv:
# deprecated `-pylab` given,
# warn and transform into current syntax
argv = argv[:] # copy, don't clobber
idx = argv.index('-pylab')
warn.warn("`-pylab` flag has been deprecated.\n"
" Use `--matplotlib <backend>` and import pylab manually.")
argv[idx] = '--pylab'
return super(TerminalIPythonApp, self).parse_command_line(argv)
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(TerminalIPythonApp, self).initialize(argv)
if self.subapp is not None:
# don't bother initializing further, starting subapp
return
# print self.extra_args
if self.extra_args and not self.something_to_run:
self.file_to_run = self.extra_args[0]
self.init_path()
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
# Now a variety of things that happen after the banner is printed.
self.init_gui_pylab()
self.init_extensions()
self.init_code()
def init_shell(self):
"""initialize the InteractiveShell instance"""
# Create an InteractiveShell instance.
# shell.display_banner should always be False for the terminal
# based app, because we call shell.show_banner() by hand below
# so the banner shows *before* all extension loading stuff.
self.shell = TerminalInteractiveShell.instance(parent=self,
display_banner=False, profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, user_ns=self.user_ns)
self.shell.configurables.append(self)
def init_banner(self):
"""optionally display the banner"""
if self.display_banner and self.interact:
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warn.warn("'inline' not available as pylab backend, "
"using 'auto' instead.")
self.pylab = 'auto'
def start(self):
if self.subapp is not None:
return self.subapp.start()
# perform any prexec steps:
if self.interact:
self.log.debug("Starting IPython's mainloop...")
self.shell.mainloop()
else:
self.log.debug("IPython not interactive...")
def load_default_config(ipython_dir=None):
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
if ipython_dir is None:
ipython_dir = get_ipython_dir()
profile_dir = os.path.join(ipython_dir, 'profile_default')
config = Config()
for cf in Application._load_config_files("ipython_config", path=profile_dir):
config.update(cf)
return config
launch_new_instance = TerminalIPythonApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
| gpl-3.0 |
astocko/statsmodels | examples/incomplete/arima.py | 34 | 1605 | from __future__ import print_function
from statsmodels.datasets.macrodata import load_pandas
from statsmodels.tsa.base.datetools import dates_from_range
from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
plt.interactive(False)
# let's examine an ARIMA model of CPI
cpi = load_pandas().data['cpi']
dates = dates_from_range('1959q1', '2009q3')
cpi.index = dates
res = ARIMA(cpi, (1, 1, 1), freq='Q').fit()
print(res.summary())
# we can look at the series
cpi.diff().plot()
# maybe logs are better
log_cpi = np.log(cpi)
# check the ACF and PCF plots
acf, confint_acf = sm.tsa.acf(log_cpi.diff().values[1:], confint=95)
# center the confidence intervals about zero
#confint_acf -= confint_acf.mean(1)[:, None]
pacf = sm.tsa.pacf(log_cpi.diff().values[1:], method='ols')
# confidence interval is now an option to pacf
from scipy import stats
confint_pacf = stats.norm.ppf(1 - .025) * np.sqrt(1 / 202.)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.set_title('Autocorrelation')
ax.plot(range(41), acf, 'bo', markersize=5)
ax.vlines(range(41), 0, acf)
ax.fill_between(range(41), confint_acf[:, 0], confint_acf[:, 1], alpha=.25)
fig.tight_layout()
ax = fig.add_subplot(122, sharey=ax)
ax.vlines(range(41), 0, pacf)
ax.plot(range(41), pacf, 'bo', markersize=5)
ax.fill_between(range(41), -confint_pacf, confint_pacf, alpha=.25)
#NOTE: you'll be able to just to this when tsa-plots is in master
#sm.graphics.acf_plot(x, nlags=40)
#sm.graphics.pacf_plot(x, nlags=40)
# still some seasonality
# try an arma(1, 1) with ma(4) term
| bsd-3-clause |
leggitta/mne-python | mne/tests/test_report.py | 9 | 8943 | # Authors: Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
import warnings
import shutil
from nose.tools import assert_true, assert_equal, assert_raises
from mne import Epochs, read_events, pick_types, read_evokeds
from mne.io import Raw
from mne.datasets import testing
from mne.report import Report
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
requires_PIL, run_tests_if_main, slow_test)
from mne.viz import plot_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
# Set our plotters to test mode
warnings.simplefilter('always') # enable b/c these tests throw warnings
@slow_test
@testing.requires_testing_data
@requires_PIL
def test_render_report():
"""Test rendering -*.fif files for mne report.
"""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = Raw(raw_fname_new)
picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
epochs.save(epochs_fname)
epochs.average().save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, on_error='raise')
assert_true(len(w) >= 1)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving functionality
report.data_path = tempdir
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, pattern=pattern)
assert_true(len(w) >= 1)
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
@testing.requires_testing_data
@requires_mayavi
@requires_PIL
def test_render_add_sections():
"""Test adding figures/images to section.
"""
tempdir = _TempDir()
import matplotlib.pyplot as plt
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
assert_raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
@slow_test
@testing.requires_testing_data
@requires_mayavi
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report.
"""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
n_jobs=2)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report.
"""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(tempdir)
assert_true(len(w) >= 1)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report.
"""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert_true(html in html_compare)
def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
assert_raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
assert_raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
run_tests_if_main()
| bsd-3-clause |
capitancambio/scikit-neuralnetwork | sknn/mlp.py | 1 | 20512 | from __future__ import (absolute_import, unicode_literals, print_function)
__all__ = ['MultiLayerPerceptronRegressor', 'MultiLayerPerceptronClassifier']
import os
import time
import logging
import itertools
log = logging.getLogger('sknn')
# By default, we force Theano to use a GPU and fallback to CPU, using 32-bits.
# This must be done in the code before Theano is imported for the first time.
os.environ['THEANO_FLAGS'] = "device=gpu,floatX=float32"
cuda = logging.getLogger('theano.sandbox.cuda')
cuda.setLevel(logging.CRITICAL)
import theano
cuda.setLevel(logging.WARNING)
import numpy
import sklearn.base
import sklearn.pipeline
import sklearn.preprocessing
import sklearn.cross_validation
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.training_algorithms import sgd
from pylearn2.models import mlp, maxout
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.training_algorithms.learning_rule import RMSProp, Momentum
from pylearn2.space import Conv2DSpace
from pylearn2.termination_criteria import MonitorBased
class ansi:
BOLD = '\033[1;97m'
WHITE = '\033[0;97m'
BLUE = '\033[0;94m'
GREEN = '\033[0;32m'
ENDC = '\033[0m'
class BaseMLP(sklearn.base.BaseEstimator):
"""
Abstract base class for wrapping the multi-layer perceptron functionality
from PyLearn2.
Parameters
----------
layers : list of tuples
An iterable sequence of each layer each as a tuple: first with an
activation type and then optional parameters such as the number of
units.
* For hidden layers, you can use the following layer types:
``Rectifier``, ``Sigmoid``, ``Tanh``, ``Maxout`` or ``Convolution``.
* For output layers, you can use the following layer types:
``Linear``, ``Softmax`` or ``Gaussian``.
You must specify at least an output layer, so the last tiple in your
layers input should contain ``Linear`` (for example).
random_state : int
Seed for the initialization of the neural network parameters (e.g.
weights and biases). This is fully deterministic.
learning_rule : str
Name of the learning rule used during stochastic gradient descent,
one of ('sgd', 'momentum', 'rmsprop') at the moment.
learning_rate : float
Real number indicating the default/starting rate of adjustment for
the weights during gradient descent. Different learning rules may
take this into account differently.
learning_momentum : float
Real number indicating the momentum factor to be used for the
learning rule 'momentum'.
batch_size : int
Number of training samples to group together when performing stochastic
gradient descent. By default each sample is treated on its own.
n_iter : int
The number of iterations of gradient descent to perform on the
neural network's weights when training with fit().
valid_set : tuple of array-like
Validation set (X_v, y_v) to be used explicitly while training. Both
arrays should have the same size for the first dimention, and the second
dimention should match with the training data specified in fit().
valid_size : float
Ratio of the training data to be used for validation. 0.0 means no
validation, and 1.0 would mean there's no training data! Common values are
0.1 or 0.25.
n_stable : int
Number of interations after which training should return when the validation
error remains constant. This is a sign that the data has been fitted.
f_stable : float
Threshold under which the validation error change is assumed to be stable, to
be used in combination with `n_stable`.
dropout : bool
Whether to use drop-out training for the inputs (jittering) and the
hidden layers, for each training example.
verbose : bool
If True, print the score at each epoch via the logger called 'sknn'. You can
control the detail of the output by customising the logger level and formatter.
"""
def __init__(
self,
layers,
random_state=None,
learning_rule='sgd',
learning_rate=0.01,
learning_momentum=0.9,
batch_size=1,
n_iter=None,
valid_set=None,
valid_size=0.0,
n_stable=50,
f_stable=0.001,
dropout=False,
verbose=False):
self.layers = layers
self.seed = random_state
self.verbose = verbose
self.unit_counts = None
self.mlp = None
self.ds = None
self.trainer = None
self.f = None
self.train_set = None
self.cost = "Dropout" if dropout else None
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.n_stable = n_stable
self.f_stable = f_stable
self.valid_set = valid_set
self.valid_size = valid_size
self.best_valid_error = float("inf")
if learning_rule == 'sgd':
self.learning_rule = None
elif learning_rule == 'momentum':
self.learning_rule = Momentum(learning_momentum)
elif learning_rule == 'rmsprop':
self.learning_rule = RMSProp()
#accept the learning rule if it's not a string
#otherwise it fails when cloning classifiers during cross validation
elif not isinstance(learning_rule,str):
self.learning_rule=learning_rule
else:
raise NotImplementedError(
"Learning rule type `%s` is not supported." % learning_rule)
self._setup()
def _setup(self):
# raise NotImplementedError("BaseMLP is an abstract class; "
# "use the Classifier or Regressor instead.")
pass
def _create_trainer(self, dataset):
sgd.log.setLevel(logging.WARNING)
if self.cost == "Dropout":
first_hidden_name = "Hidden_0_"+self.layers[0][0]
self.cost = Dropout(
input_include_probs={first_hidden_name: 1.0},
input_scales={first_hidden_name: 1.})
logging.getLogger('pylearn2.monitor').setLevel(logging.WARNING)
if dataset is not None:
termination_criterion = MonitorBased(
channel_name='objective',
N=self.n_stable,
prop_decrease=self.f_stable)
else:
termination_criterion = None
return sgd.SGD(
cost=self.cost,
batch_size=self.batch_size,
learning_rule=self.learning_rule,
learning_rate=self.learning_rate,
termination_criterion=termination_criterion,
monitoring_dataset=dataset)
def _create_hidden_layer(self, name, args, irange=0.1):
activation_type = args[0]
if activation_type == "Rectifier":
return mlp.RectifiedLinear(
layer_name=name,
dim=args[1],
irange=irange)
if activation_type == "Sigmoid":
return mlp.Sigmoid(
layer_name=name,
dim=args[1],
irange=irange)
if activation_type == "Tanh":
return mlp.Tanh(
layer_name=name,
dim=args[1],
irange=irange)
if activation_type == "Maxout":
return maxout.Maxout(
layer_name=name,
num_units=args[1],
num_pieces=args[2],
irange=irange)
if activation_type == "Convolution":
return mlp.ConvRectifiedLinear(
layer_name=name,
output_channels=args[1],
kernel_shape=args[2],
pool_shape=(1,1),
pool_stride=(1,1),
irange=irange)
raise NotImplementedError(
"Hidden layer type `%s` is not implemented." % activation_type)
def _create_output_layer(self, name, args):
activation_type = args[0]
fan_in = self.unit_counts[-2]
fan_out = self.unit_counts[-1]
lim = numpy.sqrt(6) / (numpy.sqrt(fan_in + fan_out))
if activation_type == "Linear":
return mlp.Linear(
dim=args[1],
layer_name=name,
irange=lim)
if activation_type == "Gaussian":
return mlp.LinearGaussian(
init_beta=0.1,
min_beta=0.001,
max_beta=1000,
beta_lr_scale=None,
dim=args[1],
layer_name=name,
irange=lim)
if activation_type == "Softmax":
return mlp.Softmax(
layer_name=name,
n_classes=args[1],
irange=lim)
raise NotImplementedError(
"Output layer type `%s` is not implemented." % activation_type)
def _create_mlp(self, X, y, nvis=None, input_space=None):
# Create the layers one by one, connecting to previous.
mlp_layers = []
for i, layer in enumerate(self.layers[:-1]):
fan_in = self.unit_counts[i]
fan_out = self.unit_counts[i + 1]
lim = numpy.sqrt(6) / numpy.sqrt(fan_in + fan_out)
if layer[0] == "Tanh":
lim *= 1.1 * lim
elif layer[0] in ("Rectifier", "Maxout", "Convolution"):
# He, Rang, Zhen and Sun, converted to uniform.
lim *= numpy.sqrt(2)
elif layer[0] == "Sigmoid":
lim *= 4
layer_name = "Hidden_%i_%s" % (i, layer[0])
hidden_layer = self._create_hidden_layer(layer_name, layer, irange=lim)
mlp_layers.append(hidden_layer)
# Deal with output layer as a special case.
output_layer_info = list(self.layers[-1])
output_layer_info.append(self.unit_counts[-1])
output_layer_name = "Output_%s" % output_layer_info[0]
output_layer = self._create_output_layer(output_layer_name, output_layer_info)
mlp_layers.append(output_layer)
return mlp.MLP(
mlp_layers,
nvis=nvis,
seed=self.seed,
input_space=input_space)
def _initialize(self, X, y):
assert not self.is_initialized,\
"This neural network has already been initialized."
log.info(
"Initializing neural network with %i layers, %i inputs and %i outputs.",
len(self.layers), X.shape[1], y.shape[1])
# Calculate and store all layer sizes.
self.layers[-1] = (self.layers[-1][0], y.shape[1])
self.unit_counts = [X.shape[1]]
for layer in self.layers:
self.unit_counts += [layer[1]]
log.debug(" - Type: {}{: <10}{} Units: {}{: <4}{}".format(
ansi.BOLD, layer[0], ansi.ENDC, ansi.BOLD, layer[1], ansi.ENDC))
log.debug("")
if self.valid_size > 0.0:
assert self.valid_set is None, "Can't specify valid_size and valid_set together."
X, X_v, y, y_v = sklearn.cross_validation.train_test_split(
X, y,
test_size=self.valid_size,
random_state=self.seed)
self.valid_set = X_v, y_v
self.train_set = X, y
# Convolution networks need a custom input space.
if self.is_convolution:
nvis = None
input_space = Conv2DSpace(shape=X.shape[1:], num_channels=1)
view = input_space.get_origin_batch(100)
self.ds = DenseDesignMatrix(topo_view=view, y=y)
else:
nvis = self.unit_counts[0]
input_space = None
self.ds = DenseDesignMatrix(X=X, y=y)
if self.valid_set:
X_v, y_v = self.valid_set
self.vs = DenseDesignMatrix(X=X_v, y=y_v)
else:
self.vs = None
if self.mlp is None:
self.mlp = self._create_mlp(X, y, input_space=input_space, nvis=nvis)
self.trainer = self._create_trainer(self.vs)
self.trainer.setup(self.mlp, self.ds)
inputs = self.mlp.get_input_space().make_theano_batch()
self.f = theano.function([inputs], self.mlp.fprop(inputs))
@property
def is_initialized(self):
"""Check if the neural network was setup already.
"""
return not (self.ds is None or self.trainer is None or self.f is None)
@property
def is_convolution(self):
"""Check whether this neural network includes convolution layers.
"""
return "Conv" in self.layers[0][0]
def __getstate__(self):
assert self.mlp is not None,\
"The neural network has not been initialized."
d = self.__dict__.copy()
for k in ['ds', 'f', 'trainer']:
if k in d:
del d[k]
return d
def __setstate__(self, d):
self.__dict__.update(d)
for k in ['ds', 'f', 'trainer']:
setattr(self, k, None)
def _fit(self, X, y, test=None):
assert X.shape[0] == y.shape[0],\
"Expecting same number of input and output samples."
num_samples, data_size = X.shape[0], X.size+y.size
if y.ndim == 1:
y = y.reshape((y.shape[0], 1))
if not isinstance(X, numpy.ndarray):
X = X.toarray()
if not isinstance(y, numpy.ndarray):
y = y.toarray()
if not self.is_initialized:
self._initialize(X, y)
X, y = self.train_set
else:
self.train_set = X, y
if self.is_convolution:
X = numpy.array([X]).transpose(1,2,3,0)
X = self.ds.view_converter.topo_view_to_design_mat(X)
self.ds.X, self.ds.y = X, y
# Bug in PyLearn2 that has some unicode channels, can't sort.
self.mlp.monitor.channels = {str(k): v for k, v in self.mlp.monitor.channels.items()}
log.info("Training on dataset of {:,} samples with {:,} total size.".format(num_samples, data_size))
if self.valid_set:
X_v, _ = self.valid_set
log.debug(" - Train: {: <9,} Valid: {: <4,}".format(X.shape[0], X_v.shape[0]))
if self.n_iter:
log.debug(" - Terminating loop after {} total iterations.".format(self.n_iter))
if self.n_stable:
log.debug(" - Early termination after {} stable iterations.".format(self.n_stable))
log.debug("""
Epoch Validation Error Time
---------------------------------""")
for i in itertools.count(0):
start = time.time()
self.trainer.train(dataset=self.ds)
self.mlp.monitor.report_epoch()
self.mlp.monitor()
if not self.trainer.continue_learning(self.mlp):
log.debug("")
log.info("Early termination condition fired at %i iterations.", i)
break
if self.n_iter is not None and i >= self.n_iter:
log.debug("")
log.info("Terminating after specified %i total iterations.", i)
break
if self.verbose:
objective = self.mlp.monitor.channels.get('objective', None)
if objective:
avg_valid_error = objective.val_shared.get_value()
self.best_valid_error = min(self.best_valid_error, avg_valid_error)
else:
avg_valid_error = None
best_valid = bool(self.best_valid_error == avg_valid_error)
log.debug("{:>5} {}{}{} {:>3.1f}s".format(
i,
ansi.GREEN if best_valid else "",
"{:>10.6f}".format(float(avg_valid_error)) if avg_valid_error else " N/A ",
ansi.ENDC if best_valid else "",
time.time() - start
))
return self
def _predict(self, X):
if not self.is_initialized:
raise ValueError("The neural network has not been trained.")
if X.dtype != numpy.float32:
X = X.astype(numpy.float32)
if not isinstance(X, numpy.ndarray):
X = X.toarray()
if self.is_convolution:
X = numpy.array([X]).transpose(1,2,3,0)
return self.f(X)
class MultiLayerPerceptronRegressor(BaseMLP, sklearn.base.RegressorMixin):
"""Regressor compatible with sklearn that wraps PyLearn2.
"""
def fit(self, X, y):
"""Fit the neural network to the given data.
Parameters
----------
X : array-like, shape (n_samples, n_inputs)
Training vectors as real numbers, where n_samples is the number of
samples and n_inputs is the number of input features.
y : array-like, shape (n_samples, n_outputs)
Target values as real numbers, either as regression targets or
label probabilities for classification.
Returns
-------
self : object
Returns this instance.
"""
return super(MultiLayerPerceptronRegressor, self)._fit(X, y)
def predict(self, X):
"""Calculate predictions for specified inputs.
Parameters
----------
X : array-like, shape (n_samples, n_inputs)
The input samples as real numbers.
Returns
-------
y : array, shape (n_samples, n_outputs)
The predicted values as real numbers.
"""
return super(MultiLayerPerceptronRegressor, self)._predict(X)
class MultiLayerPerceptronClassifier(BaseMLP, sklearn.base.ClassifierMixin):
"""Classifier compatible with sklearn that wraps PyLearn2.
"""
def _setup(self):
# WARNING: Unfortunately, sklearn's LabelBinarizer handles binary data
# as a special case and encodes it very differently to multiclass cases.
# In our case, we want to have 2D outputs when there are 2 classes, or
# the predicted probabilities (e.g. Softmax) will be incorrect.
# The LabelBinarizer is also implemented in a way that this cannot be
# customized without a providing a complete rewrite, so here we patch
# the `type_of_target` function for this to work correctly,
import sklearn.preprocessing.label as L
L.type_of_target = lambda _: "multiclass"
self.label_binarizer = sklearn.preprocessing.LabelBinarizer()
def fit(self, X, y):
# check now for correct shapes
assert X.shape[0] == y.shape[0],\
"Expecting same number of input and output samples."
# Scan training samples to find all different classes.
self.label_binarizer.fit(y)
yp = self.label_binarizer.transform(y)
# Now train based on a problem transformed into regression.
return super(MultiLayerPerceptronClassifier, self)._fit(X, yp, test=y)
def partial_fit(self, X, y, classes=None):
if classes is not None:
self.label_binarizer.fit(classes)
return self.fit(X, y)
def predict_proba(self, X):
"""Calculate probability estimates based on these input features.
Parameters
----------
X : array-like of shape [n_samples, n_features]
The input data as a numpy array.
Returns
-------
y_prob : array-like of shape [n_samples, n_classes]
The predicted probability of the sample for each class in the
model, in the same order as the classes.
"""
proba = super(MultiLayerPerceptronClassifier, self)._predict(X)
return proba / proba.sum(1, keepdims=True)
def predict(self, X):
"""Predict class by converting the problem to a regression problem.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The predicted classes, or the predicted values.
"""
y = self.predict_proba(X)
return self.label_binarizer.inverse_transform(y, threshold=0.5)
| bsd-3-clause |
TeamHG-Memex/eli5 | tests/utils.py | 1 | 4565 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import inspect
import json
from pprint import pprint
from hypothesis.strategies import integers
from hypothesis.extra.numpy import arrays
import numpy as np
from eli5.base import Explanation
from eli5.formatters import format_as_text, format_as_html, format_as_dict
from eli5.formatters.html import html_escape
from eli5.formatters.text import format_signed
from eli5.sklearn.utils import sklearn_version
SGD_KWARGS = {'random_state': 42}
if sklearn_version() >= '0.19':
SGD_KWARGS['tol'] = 1e-3
def rnd_len_arrays(dtype, min_len=0, max_len=3, elements=None):
""" Generate numpy arrays of random length """
lengths = integers(min_value=min_len, max_value=max_len)
return lengths.flatmap(lambda n: arrays(dtype, n, elements=elements))
def format_as_all(res, clf, **kwargs):
""" Format explanation as text and html, check JSON-encoding,
print text explanation, save html, return text and html.
"""
expl_dict = format_as_dict(res)
pprint(expl_dict)
json.dumps(expl_dict) # check that it can be serialized to JSON
expl_text = format_as_text(res, **kwargs)
expl_html = format_as_html(res, **kwargs)
print(expl_text)
write_html(clf, expl_html, expl_text, caller_depth=2)
return expl_text, expl_html
def strip_blanks(html):
""" Remove whitespace and line breaks from html.
"""
return html.replace(' ', '').replace('\n', '')
def write_html(clf, html, text, postfix='', caller_depth=1):
""" Write to html file in .html directory. Filename is generated from calling
function name and module, and clf class name.
This is useful to check and debug format_as_html function.
"""
caller = inspect.stack()[caller_depth]
try:
test_name, test_file = caller.function, caller.filename
except AttributeError:
test_name, test_file = caller[3], caller[1]
test_file = os.path.basename(test_file).rsplit('.', 1)[0]
filename = '{}_{}_{}{}.html'.format(
test_file, test_name, clf.__class__.__name__, postfix)
dirname = '.html'
if not os.path.isdir(dirname):
os.mkdir(dirname)
path = os.path.join(dirname, filename)
with open(path, 'wb') as f:
f.write(u'Text:<pre>{text}</pre>End of text<hr/>\n{html}'
.format(text=html_escape(text), html=html)
.encode('utf8'))
print('html written to {}'.format(path))
def get_all_features(feature_weights, with_weights=False):
""" Collect a dict of all features and their weights.
"""
features = {}
for fw in feature_weights:
if isinstance(fw.feature, list):
features.update((f['name'], fw.weight) for f in fw.feature)
else:
features[fw.feature] = fw.weight
return features if with_weights else set(features)
def get_names_coefs(feature_weights):
return [(format_signed(fw.feature[0]) if isinstance(fw.feature, list)
else fw.feature,
fw.weight)
for fw in feature_weights]
def check_targets_scores(explanation, atol=1e-8):
# type: (Explanation, float) -> None
""" Check that feature weights sum to target score or proba,
if both proba and score are present they match,
and that there are no "remaining" features.
"""
targets = explanation.targets
for target in targets:
weights = target.feature_weights
# else the check is invalid
assert weights.neg_remaining == weights.pos_remaining == 0
weights_sum = (sum(fw.weight for fw in weights.pos) +
sum(fw.weight for fw in weights.neg))
expected = target.score if target.score is not None else target.proba
assert np.isclose(abs(expected), abs(weights_sum), atol=atol), \
(expected, weights_sum)
if any(t.score is not None for t in targets):
if len(targets) == 1 and targets[0].proba is not None:
target = targets[0]
# one target with proba => assume sigmoid
proba = 1. / (1 + np.exp(-target.score))
assert np.isclose(target.proba, proba, atol=atol) or \
np.isclose(target.proba, 1-proba, atol=atol)
elif any(t.proba is not None for t in targets):
# many targets with proba => assume softmax
norm = np.sum(np.exp([t.score for t in targets]))
for target in targets:
assert np.isclose(np.exp(target.score) / norm, target.proba,
atol=atol)
| mit |
xgds/xgds_plot | xgds_plot/staticPlot.py | 1 | 5302 | #!/usr/bin/env python
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
import datetime
import pytz
import logging
import multiprocessing
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import matplotlib
# must set matplotlib mode before importing pylab to suppress errors
matplotlib.interactive(False)
matplotlib.use('agg')
from matplotlib import pyplot as plt
from geocamUtil.loader import getClassByName
from django.conf import settings
from xgds_plot import pylabUtil
from xgds_plot.meta import TIME_SERIES_LOOKUP
# shift time zone from UTC to desired time zone
TIME_OFFSET_DAYS = float(settings.XGDS_PLOT_TIME_OFFSET_HOURS) / 24
def epochMsToDateTime(epochMs):
epochSecs = float(epochMs) / 1000.0
return datetime.datetime.utcfromtimestamp(epochSecs)
def epochMsToMatPlotLib(epochMs):
return (matplotlib.dates.date2num(epochMsToDateTime(epochMs))
+ TIME_OFFSET_DAYS)
def writePlotData(out,
seriesId,
widthPix=None,
heightPix=None,
minTime=None,
maxTime=None):
if widthPix is None:
widthPix = 800
if heightPix is None:
heightPix = 120
xinch = float(widthPix) / 100
yinch = float(heightPix) / 100
fig = plt.figure()
meta = TIME_SERIES_LOOKUP[seriesId]
queryClass = getClassByName(meta['queryType'])
queryManager = queryClass(meta)
valueClass = getClassByName(meta['valueType'])
valueManager = valueClass(meta, queryManager)
recs = queryManager.getData(minTime=minTime,
maxTime=maxTime)
timestamps = [epochMsToMatPlotLib(queryManager.getTimestamp(rec))
for rec in recs]
vals = [valueManager.getValue(rec)
for rec in recs]
plt.plot(timestamps, vals)
ax = fig.gca()
ax.grid(True)
xmin, xmax, ymin, ymax = ax.axis()
if minTime:
xmin = epochMsToMatPlotLib(minTime)
if maxTime:
xmax = epochMsToMatPlotLib(maxTime)
if ymin == 0 and ymax == 1:
# HACK styling special case
ymin = -0.1
ymax = 1.1
ax.axis([xmin, xmax, ymin, ymax])
pylabUtil.setXAxisDate()
plt.title(queryManager.getValueName(meta['valueField']))
logging.info('writePlotData: writing image')
plt.setp(fig, figwidth=xinch, figheight=yinch)
fig.savefig(out,
format='png',
bbox_inches='tight',
dpi=100,
pad_inches=0.05,
# transparent=True,
)
logging.info('writePlotData: releasing memory')
fig.clf()
plt.close(fig)
def savePlot(path,
seriesId,
widthPix=None,
heightPix=None,
minTime=None,
maxTime=None):
with open(path, 'wb') as out:
writePlotData(out,
seriesId,
widthPix,
heightPix,
minTime,
maxTime)
def getPlotData(seriesId,
widthPix=None,
heightPix=None,
minTime=None,
maxTime=None):
out = StringIO()
writePlotData(out,
seriesId,
widthPix,
heightPix,
minTime,
maxTime)
return out.getvalue()
def getPlotDataMultiprocessing(seriesId,
widthPix=None,
heightPix=None,
minTime=None,
maxTime=None):
pool = multiprocessing.Pool(processes=1)
imgData = pool.apply(getPlotData,
(seriesId,
widthPix,
heightPix,
minTime,
maxTime))
pool.close()
pool.join()
return imgData
def testStaticPlot():
now = datetime.datetime.now(pytz.utc)
ago = now - datetime.timedelta(days=3)
for f in ('airPressure', 'relativeHumidity'):
savePlot(path=f + '.png',
seriesId=f,
widthPix=None,
heightPix=None,
minTime=ago,
maxTime=now)
def main():
import optparse
parser = optparse.OptionParser('usage: %prog')
_opts, args = parser.parse_args()
if args:
parser.error('expected no args')
logging.basicConfig(level=logging.DEBUG)
testStaticPlot()
if __name__ == '__main__':
main()
| apache-2.0 |
Srisai85/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
IssamLaradji/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
LCAV/pyroomacoustics | examples/room_from_rt60.py | 1 | 2970 | """
This example creates a room with reverberation time specified by inverting Sabine's formula.
This results in a reverberation time slightly longer than desired.
The simulation is pure image source method.
The audio sample with the reverb added is saved back to `examples/samples/guitar_16k_reverb.wav`.
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import wavfile
import pyroomacoustics as pra
methods = ["ism", "hybrid"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Simulates and adds reverberation to a dry sound sample. Saves it into `./examples/samples`."
)
parser.add_argument(
"--method",
"-m",
choices=methods,
default=methods[0],
help="Simulation method to use",
)
args = parser.parse_args()
# The desired reverberation time and dimensions of the room
rt60_tgt = 0.3 # seconds
room_dim = [10, 7.5, 3.5] # meters
# import a mono wavfile as the source signal
# the sampling frequency should match that of the room
fs, audio = wavfile.read("examples/samples/guitar_16k.wav")
# We invert Sabine's formula to obtain the parameters for the ISM simulator
e_absorption, max_order = pra.inverse_sabine(rt60_tgt, room_dim)
# Create the room
if args.method == "ism":
room = pra.ShoeBox(
room_dim, fs=fs, materials=pra.Material(e_absorption), max_order=max_order
)
elif args.method == "hybrid":
room = pra.ShoeBox(
room_dim,
fs=fs,
materials=pra.Material(e_absorption),
max_order=3,
ray_tracing=True,
air_absorption=True,
)
# place the source in the room
room.add_source([2.5, 3.73, 1.76], signal=audio, delay=0.5)
# define the locations of the microphones
mic_locs = np.c_[
[6.3, 4.87, 1.2], [6.3, 4.93, 1.2], # mic 1 # mic 2
]
# finally place the array in the room
room.add_microphone_array(mic_locs)
# Run the simulation (this will also build the RIR automatically)
room.simulate()
room.mic_array.to_wav(
f"examples/samples/guitar_16k_reverb_{args.method}.wav",
norm=True,
bitdepth=np.int16,
)
# measure the reverberation time
rt60 = room.measure_rt60()
print("The desired RT60 was {}".format(rt60_tgt))
print("The measured RT60 is {}".format(rt60[1, 0]))
# Create a plot
plt.figure()
# plot one of the RIR. both can also be plotted using room.plot_rir()
rir_1_0 = room.rir[1][0]
plt.subplot(2, 1, 1)
plt.plot(np.arange(len(rir_1_0)) / room.fs, rir_1_0)
plt.title("The RIR from source 0 to mic 1")
plt.xlabel("Time [s]")
# plot signal at microphone 1
plt.subplot(2, 1, 2)
plt.plot(room.mic_array.signals[1, :])
plt.title("Microphone 1 signal")
plt.xlabel("Time [s]")
plt.tight_layout()
plt.show()
| mit |
operalib/operalib | operalib/kernels.py | 2 | 18637 | """
:mod:`operalib.kernels` implements some Operator-Valued Kernel
models.
"""
# Author: Romain Brault <romain.brault@telecom-paristech.fr> with help from
# the scikit-learn community.
# License: MIT
from numpy import dot, diag, sqrt
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.kernel_approximation import RBFSampler, SkewedChi2Sampler
from scipy.sparse.linalg import LinearOperator
from scipy.linalg import svd
class DotProductKernel(object):
r"""
Dot product Operator-Valued Kernel of the form:
.. math::
x, y \mapsto K(x, y) = \mu \langle x, y \rangle 1_p + (1-\mu) \langle
x, y \rangle^2 I_p
Attributes
----------
mu : {array, LinearOperator}, shape = [n_targets, n_targets]
Tradeoff between shared and independant components
p : {Int}
dimension of the targets (n_targets).
References
----------
See also
--------
DotProductKernelMap
Dot Product Kernel Map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 10)
>>> K = ovk.DotProductKernel(mu=.2, p=5)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<500x500 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, mu, p):
"""Initialize the Dot product Operator-Valued Kernel.
Parameters
----------
mu : {float}
Tradeoff between shared and independant components.
p : {integer}
dimension of the targets (n_targets).
"""
self.mu = mu
self.p = p
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DotProductKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import DotProductKernelMap
return DotProductKernelMap(X, self.mu, self.p)
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DotProductKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class DecomposableKernel(object):
r"""
Decomposable Operator-Valued Kernel of the form:
.. math::
X, Y \mapsto K(X, Y) = k_s(X, Y) A
where A is a symmetric positive semidefinite operator acting on the
outputs.
Attributes
----------
A : {array, LinearOperator}, shape = [n_targets, n_targets]
Linear operator acting on the outputs
scalar_kernel : {callable}
Callable which associate to the training points X the Gram matrix.
scalar_kernel_params : {mapping of string to any}
Additional parameters (keyword arguments) for kernel function passed as
callable object.
References
----------
See also
--------
DecomposableKernelMap
Decomposable Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 10)
>>> K = ovk.DecomposableKernel(np.eye(2))
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, A, scalar_kernel=rbf_kernel, scalar_kernel_params=None):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
A : {array, LinearOperator}, shape = [n_targets, n_targets]
Linear operator acting on the outputs
scalar_kernel : {callable}
Callable which associate to the training points X the Gram matrix.
scalar_kernel_params : {mapping of string to any}, optional
Additional parameters (keyword arguments) for kernel function
passed as callable object.
"""
self.A = A
self.scalar_kernel = scalar_kernel
self.scalar_kernel_params = scalar_kernel_params
self.p = A.shape[0]
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import DecomposableKernelMap
return DecomposableKernelMap(X, self.A,
self.scalar_kernel,
self.scalar_kernel_params)
def get_orff_map(self, X, D=100, eps=1e-5, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
u, s, v = svd(self.A, full_matrices=False, compute_uv=True)
self.B_ = dot(diag(sqrt(s[s > eps])), v[s > eps, :])
self.r = self.B_.shape[0]
if (self.scalar_kernel is rbf_kernel) and not hasattr(self, 'Xb_'):
if self.scalar_kernel_params is None:
gamma = 1.
else:
gamma = self.scalar_kernel_params['gamma']
self.phi_ = RBFSampler(gamma=gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X).astype(X.dtype)
elif (self.scalar_kernel is 'skewed_chi2') and not hasattr(self,
'Xb_'):
if self.scalar_kernel_params is None:
skew = 1.
else:
skew = self.scalar_kernel_params['skew']
self.phi_ = SkewedChi2Sampler(skewedness=skew,
n_components=D,
random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X).astype(X.dtype)
elif not hasattr(self, 'Xb_'):
raise NotImplementedError('ORFF map for kernel is not '
'implemented yet')
D = self.phi_.n_components
if X is self.Xb_:
cshape = (D, self.r)
rshape = (self.Xb_.shape[0], self.p)
oshape = (self.Xb_.shape[0] * self.p, D * self.r)
return LinearOperator(oshape,
dtype=self.Xb_.dtype,
matvec=lambda b: dot(dot(self.Xb_,
b.reshape(cshape)),
self.B_),
rmatvec=lambda r: dot(Xb.T,
dot(r.reshape(rshape),
self.B_.T)))
else:
Xb = self.phi_.transform(X)
cshape = (D, self.r)
rshape = (X.shape[0], self.p)
oshape = (Xb.shape[0] * self.p, D * self.r)
return LinearOperator(oshape,
dtype=self.Xb_.dtype,
matvec=lambda b: dot(dot(Xb,
b.reshape(cshape)),
self.B_),
rmatvec=lambda r: dot(Xb.T,
dot(r.reshape(rshape),
self.B_.T)))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class RBFCurlFreeKernel(object):
r"""
Curl-free Operator-Valued Kernel of the form:
.. math::
X \mapsto K_X(Y) = 2 \gamma exp(-\gamma||X - Y||^2)(I - 2\gamma(X - Y)
(X - T)^T).
Attributes
----------
gamma : {float}
RBF kernel parameter.
References
----------
See also
--------
RBFCurlFreeKernelMap
Curl-free Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 2)
>>> K = ovk.RBFCurlFreeKernel(1.)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, gamma):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
gamma : {float}, shape = [n_targets, n_targets]
RBF kernel parameter.
"""
self.gamma = gamma
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import RBFCurlFreeKernelMap
return RBFCurlFreeKernelMap(X, self.gamma)
def get_orff_map(self, X, D=100, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
self.r = 1
if not hasattr(self, 'Xb_'):
self.phi_ = RBFSampler(gamma=self.gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X)
self.Xb_ = (self.Xb_.reshape((self.Xb_.shape[0],
1, self.Xb_.shape[1])) *
self.phi_.random_weights_.reshape((1, -1,
self.Xb_.shape[1])))
self.Xb_ = self.Xb_.reshape((-1, self.Xb_.shape[2]))
D = self.phi_.n_components
if X is self.Xb_:
return LinearOperator(self.Xb_.shape,
matvec=lambda b: dot(self.Xb_ * b),
rmatvec=lambda r: dot(self.Xb_.T * r))
else:
Xb = self.phi_.transform(X)
Xb = (Xb.reshape((Xb.shape[0], 1, Xb.shape[1])) *
self.phi_.random_weights_.reshape((1, -1, Xb.shape[1])))
Xb = Xb.reshape((-1, Xb.shape[2]))
return LinearOperator(Xb.shape,
matvec=lambda b: dot(Xb, b),
rmatvec=lambda r: dot(Xb.T, r))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class RBFDivFreeKernel(object):
r"""
Divergence-free Operator-Valued Kernel of the form:
.. math::
X \mapsto K_X(Y) = exp(-\gamma||X-Y||^2)A_{X,Y},
where,
.. math::
A_{X,Y} = 2\gamma(X-Y)(X-T)^T+((d-1)-2\gamma||X-Y||^2 I).
Attributes
----------
gamma : {float}
RBF kernel parameter.
References
----------
See also
--------
RBFDivFreeKernelMap
Divergence-free Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 2)
>>> K = ovk.RBFDivFreeKernel(1.)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, gamma):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
gamma : {float}, shape = [n_targets, n_targets]
RBF kernel parameter.
"""
self.gamma = gamma
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import RBFDivFreeKernelMap
return RBFDivFreeKernelMap(X, self.gamma)
def get_orff_map(self, X, D=100, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
self.r = 1
if not hasattr(self, 'Xb_'):
self.phi_ = RBFSampler(gamma=self.gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X)
self.Xb_ = (self.Xb_.reshape((self.Xb_.shape[0],
1, self.Xb_.shape[1])) *
self.phi_.random_weights_.reshape((1, -1,
self.Xb_.shape[1])))
self.Xb_ = self.Xb_.reshape((-1, self.Xb_.shape[2]))
D = self.phi_.n_components
if X is self.Xb_:
return LinearOperator(self.Xb_.shape,
matvec=lambda b: dot(self.Xb_ * b),
rmatvec=lambda r: dot(self.Xb_.T * r))
else:
Xb = self.phi_.transform(X)
# TODO:
# w = self.phi_.random_weights_.reshape((1, -1, Xb.shape[1]))
# wn = np.linalg.norm(w)
# Xb = (Xb.reshape((Xb.shape[0], 1, Xb.shape[1])) *
# wn * np.eye()w np.dot(w.T, w) / wn)
Xb = Xb.reshape((-1, Xb.shape[2]))
return LinearOperator(Xb.shape,
matvec=lambda b: dot(Xb, b),
rmatvec=lambda r: dot(Xb.T, r))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
| bsd-3-clause |
joansmith/openmicroscopy | components/tools/OmeroPy/src/omero/install/perf_test.py | 11 | 11850 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Add: screen/plate
# Add: plotting
#
import re
import os
import sys
import path
import time
import omero
import logging
import omero.cli
import omero.util
import omero.util.temp_files
import uuid
command_pattern = "^\s*(\w+)(\((.*)\))?(:(.*))?$"
command_pattern_compiled = re.compile(command_pattern)
log = logging.getLogger("omero.perf")
FILE_FORMAT = """
File format:
<blank> Ignored
# comment Ignored
ServerTime(repeat=100) Retrieve the server time 100 \
times
Import:<file> Import given file
Import(Screen:<id>):<file> Import given file into screen
Import(Dataset:<id>):<file> Import given file into \
project/dataset
Import(Project:<id>,Dataset:<id>):<file> Import given file into \
project/dataset
Import(Dataset:some name):<file> Import given file into a new \
dataset
Import(Dataset):<file> Import given file into last \
created dataset (or create a new one)
#
# "Import" is the name of a command available in the current context
# Use the "--list" command to print them all. All lines must be of the
# form: %s
""" % command_pattern
#
# Main classes
#
class ItemException(Exception):
pass
class BadCommand(ItemException):
pass
class BadLine(ItemException):
pass
class BadPath(ItemException):
pass
class BadImport(ItemException):
pass
class Item(object):
"""
Single line-item in the configuration file
"""
def __init__(self, line):
self.line = line.strip()
if not self.comment():
match = command_pattern_compiled.match(self.line)
if not match:
raise BadLine("Unexpected line: %s" % line)
self.command = match.group(1)
self.arguments = match.group(3)
self.path = match.group(5)
self.props = dict()
if self.arguments:
args = self.arguments.split(",")
for arg in args:
parts = arg.split("=", 1)
value = (len(parts) == 2 and parts[1] or "")
self.props[parts[0]] = value
log.debug("Found line: %s, %s, %s, %s", self.command,
self.arguments, self.path, self.props)
def repeat(self):
return int(self.props.get("repeat", "1"))
def comment(self):
if len(self.line) == 0:
return True
elif self.line.startswith("#"):
return True
def execute(self, ctx):
if self.comment():
return
m = getattr(self, "_op_%s" % self.command, None)
if m is None:
raise BadCommand("Unknown command: %s" % self.command)
m(ctx)
def create_obj(self, ctx, name):
id = None
id_path = ctx.dir / ("%s.id" % name)
prop = self.props.get(name)
# Do nothing if not in props
if prop is None:
return None
# If an integer, use that as an id
try:
id = int(prop)
log.debug("Using specified %s:%s" % (name, id))
except:
# Otherwise, create/re-use
if prop == "":
try:
id = int(id_path.lines()[0])
except Exception, e:
log.debug("No %s.id: %s", name, e)
prop = str(uuid.uuid4())
# Now, if there's still no id, create one
if id is not None:
log.debug("Using reload %s:%s" % (name, id))
else:
kls = getattr(omero.model, "%sI" % name)
obj = kls()
obj.name = omero.rtypes.rstring(prop)
obj = ctx.update_service().saveAndReturnObject(obj)
id = obj.id.val
log.debug("Created obj %s:%s" % (name, id))
id_path.write_text(str(id))
return id
def create_link(self, ctx, kls_name, parent, child):
link = ctx.query_service().findByQuery(
"select link from %s link where link.parent.id = %s and"
" link.child.id = %s" % (kls_name, parent.id.val, child.id.val),
None)
if link:
log.debug("Found link %s:%s" % (kls_name, link.id.val))
else:
kls = getattr(omero.model, "%sI" % kls_name)
obj = kls()
obj.parent = parent
obj.child = child
obj = ctx.update_service().saveAndReturnObject(obj)
log.debug("Created link %s:%s" % (kls_name, obj.id.val))
def _op_Import(self, ctx):
p = path.path(self.path)
if not p.exists():
raise BadPath("File does not exist: %s" % self.path)
f = str(p.abspath())
out = ctx.dir / ("import_%s.out" % ctx.count)
err = ctx.dir / ("import_%s.err" % ctx.count)
args = ["import", "---file=%s" % str(out), "---errs=%s" % str(err),
"-s", ctx.host(), "-k", ctx.key(), f]
s_id = self.create_obj(ctx, "Screen")
if s_id:
args.extend(["-r", str(s_id)])
p_id = self.create_obj(ctx, "Project")
d_id = self.create_obj(ctx, "Dataset")
if p_id and d_id:
self.create_link(
ctx, "ProjectDatasetLink", omero.model.ProjectI(p_id, False),
omero.model.DatasetI(d_id, False))
if d_id:
args.extend(["-d", str(d_id)])
ctx.cli.invoke(args)
if ctx.cli.rv != 0:
raise BadImport("Failed import: rv=%s" % ctx.cli.rv)
num_pix = len(out.lines())
log.debug("Import count: %s", num_pix)
def _op_ServerTime(self, ctx):
ctx.config_service().getServerTime()
def _op_LoadFormats(self, ctx):
ctx.query_service().findAll("Format", None)
class Context(object):
"""
Login context which can be used by any handler
for connecting to a single session.
"""
def __init__(self, id, reporter=None, client=None):
self.reporters = []
self.count = 0
self.id = id
if client is None:
self.client = omero.client(id)
self.client.setAgent("OMERO.perf_test")
self.client.createSession()
else:
self.client = client
self.services = {}
self.cli = omero.cli.CLI()
self.cli.loadplugins()
self.setup_dir()
log.debug("Running performance tests in %s", self.dir)
def add_reporter(self, reporter):
self.reporters.append(reporter)
def setup_dir(self):
self.dir = path.path(".") / ("perfdir-%s" % os.getpid())
if self.dir.exists():
raise Exception("%s exists!" % self.dir)
self.dir.makedirs()
# Adding a file logger
handler = logging.handlers.RotatingFileHandler(
str(self.dir / "perf.log"), maxBytes=10000000, backupCount=5)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(omero.util.LOGFORMAT)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
# log.addHandler(handler)
log.debug("Started: %s", time.ctime())
def incr(self):
self.count += 1
def host(self):
return self.client.getProperty("omero.host")
def key(self):
return self.client.sf.ice_getIdentity().name
def report(self, command, start, stop, loops, rv):
for reporter in self.reporters:
reporter.report(command, start, stop, loops, rv)
def _stateless(self, name, prx):
svc = self.services.get(name)
if svc:
return svc
else:
svc = self.client.sf.getByName(name)
svc = prx.uncheckedCast(svc)
self.services[name] = svc
return svc
def query_service(self):
return self._stateless(omero.constants.QUERYSERVICE,
omero.api.IQueryPrx)
def config_service(self):
return self._stateless(omero.constants.CONFIGSERVICE,
omero.api.IConfigPrx)
def update_service(self):
return self._stateless(omero.constants.UPDATESERVICE,
omero.api.IUpdatePrx)
class PerfHandler(object):
def __init__(self, ctx=None):
self.ctx = ctx
def __call__(self, line):
(self.ctx.dir/"line.log").write_text(line, append=True)
item = Item(line)
if item.comment():
return
values = {}
total = 0.0
self.ctx.incr()
start = time.time()
loops = item.repeat()
for i in range(loops):
try:
item.execute(self.ctx)
except ItemException:
log.exception("Error")
sys.exit(1)
except Exception:
log.debug("Error during execution: %s" % item.line.strip(),
exc_info=True)
errs = values.get("errs", 0)
errs += 1
values["errs"] = errs
if loops > 1:
values["avg"] = total / loops
stop = time.time()
total += (stop - start)
self.ctx.report(item.command, start, stop, loops, values)
#
# Reporter hierarchy
#
class Reporter(object):
"""
Abstract base class of all reporters
"""
def report(self, command, start, stop, loops, rv):
raise Exception("Not implemented")
class CsvReporter(Reporter):
def __init__(self, dir=None):
if dir is None:
self.stream = sys.stdout
else:
self.file = str(dir / "report.csv")
self.stream = open(self.file, "w")
print >>self.stream, "Command,Start,Stop,Elapsed,Average,Values"
def report(self, command, start, stop, loops, values):
print >>self.stream, "%s,%s,%s,%s,%s,%s" % (
command, start, stop, (stop-start), (stop-start)/loops, values)
self.stream.flush()
class HdfReporter(Reporter):
def __init__(self, dir):
import tables
self.file = str(dir / "report.hdf")
self.hdf = tables.openFile(self.file, "w")
self.tbl = self.hdf.createTable("/", "report", {
"Command": tables.StringCol(pos=0, itemsize=64),
"Start": tables.Float64Col(pos=1),
"Stop": tables.Float64Col(pos=2),
"Elapsed": tables.Float64Col(pos=3),
"Average": tables.Float64Col(pos=4),
"Values": tables.StringCol(pos=5, itemsize=1000)
})
self.row = self.tbl.row
def report(self, command, start, stop, loops, values):
self.row["Command"] = command
self.row["Start"] = start
self.row["Stop"] = stop
self.row["Elapsed"] = (stop-start)
self.row["Average"] = (stop-start)/loops
self.row["Values"] = values
self.row.append()
self.hdf.flush()
class PlotReporter(Reporter):
def __init__(self):
return
# import matplotlib.pyplot as plt
# self.fig = plt.figure()
# self.ax = fig.add_subplot(111)
def report(self, command, start, stop, loops, values):
return
# ax.set_ylim(-2,25)
# ax.set_xlim(0, (last-first))
# plt.show()
########################################################
#
# Functions for the execution of this module
#
def handle(handler, files):
"""
Primary method used by the command-line execution of
this module.
"""
log.debug("Running perf on files: %s", files)
for file in files:
for line in file:
handler(line)
log.debug("Handled %s lines" % handler.ctx.count)
| gpl-2.0 |
altairpearl/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/scipy/signal/spectral.py | 7 | 66506 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.00099728892368242854
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = np.sum((win[ii*step:(ii+1)*step] for ii in range(nperseg//step)),
axis=0)
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if neccessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving
average filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input valdiation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating
data?", http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| mit |
shogun-toolbox/shogun | examples/undocumented/python/graphical/classifier_perceptron_graphical.py | 2 | 2321 | import matplotlib.pyplot as plt
import numpy as np
parameter_list = [[20, 5, 1, 1000, 1, None, 5], [100, 5, 1, 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1, max_iter=1000, num_threads=1, seed=None,
nperceptrons=5):
import shogun as sg
# 2D data
_DIM = 2
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM, n)) + distance
Y = np.array(np.random.randn(_DIM, n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X, Y))
feats_train = sg.create_features(fm_train_real)
labels = sg.create_labels(label_train_twoclass)
perceptron = sg.create_machine('Perceptron', labels=labels, learn_rate=learn_rate, max_iterations=max_iter,
initialize_hyperplane=False)
# Find limits for visualization
x_min = min(np.min(X[0, :]), np.min(Y[0, :]))
x_max = max(np.max(X[0, :]), np.max(Y[0, :]))
y_min = min(np.min(X[1, :]), np.min(Y[1, :]))
y_max = max(np.max(X[1, :]), np.max(Y[1, :]))
for i in range(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.put('w', np.random.random(2))
perceptron.put('bias', np.random.random())
# Run the perceptron algorithm
perceptron.train(feats_train)
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get('bias')
w = perceptron.get('w')
hx = np.linspace(x_min - 1, x_max + 1)
hy = -w[1] / w[0] * hx
plt.plot(hx, -1 / w[1] * (w[0] * hx + b))
# Plot the two-class data
plt.scatter(X[0, :], X[1, :], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0, :], Y[1, :], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min - 1, x_max + 1, y_min - 1, y_max + 1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__ == '__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/frame/test_mutate_columns.py | 7 | 7831 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=lrange(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_index_equal(df.columns, Index(['foo', 'c', 'b', 'a']))
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
self.assert_index_equal(df.columns,
Index(['foo', 'c', 'bar', 'b', 'a']))
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
# GH 13522
df = DataFrame(index=['A', 'B', 'C'])
df['X'] = df.index
df['X'] = ['x', 'y', 'z']
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
# 10912
# inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
'A', 'B', 'C'], index=['X', 'Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=[
'A', 'C'], index=['X', 'Y'])
assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=['X', 'Y'], name='B') + 1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]],
columns=['c', 'a', 'b'])
assert_frame_equal(result, expected)
| mit |
ClusterWhisperer/clusterstats | tests/test_http.py | 1 | 8802 | """Testcases for the clusterstats module."""
import unittest
import json
from pprint import pprint
import httpretty
from requests import HTTPError, Timeout
import pandas as pd
from clusterstats import http
from clusterstats import stats
class ClusterStatsTest(unittest.TestCase):
def test_read_servers(self):
"""Test the _read_servers private method"""
self.assertEquals(len(http._read_servers("data/servers.txt")), 1000)
def test_transform_hostname_to_http_endpoint(self):
"""Test the _transform_hostname_to_http_endpoint"""
hosts=["server1", "server2"]
expected_out=["http://server1/status", "http://server2/status"]
self.assertEquals(http._transform_hostname_to_http_endpoint(hosts), expected_out)
@httpretty.activate
def test_http_OK(self):
"""Test Http Connectivity - Success scenario"""
url='http://myserver/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=200,
body=content,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(result[0], 0)
self.assertTrue(result[1]["Application"] == "Webapp2")
@httpretty.activate
def test_http_404(self):
"""Test HTTP Error Condition"""
def exception_callback(request, uri, headers):
raise HTTPError("404 Page Not found.")
url='http://myserver/status'
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=404,
body=exception_callback,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(-1,result[0])
@httpretty.activate
def test_http_timeout(self):
"""Test Timeout Condition"""
def exception_callback(request, uri, headers):
raise Timeout("Connection Timeout.")
url='http://myserver/status'
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=504,
body=exception_callback,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(-1,result[0])
@httpretty.activate
def test_json_error(self):
"""Test ValueError if JSON not returned."""
url='http://myserver/status'
content="Hello World"
httpretty.register_uri(
method=httpretty.GET,
uri=url,
status=200,
body=content,
content_type="application/json"
)
result=self._connect(url)
self.assertEquals(-1,result[0])
@httpretty.activate
def test_http_retries(self):
"""Test HTTP Session connection retries """
def exception_callback(request, uri, headers):
raise Timeout("Connection Timeout. - 2")
url='http://myserver/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
httpretty.register_uri(
method=httpretty.GET,
uri=url,
responses=[
httpretty.Response(body=exception_callback, status=504),
httpretty.Response(body=exception_callback, status=504),
httpretty.Response(body=content, status=200, content_type="application/json"),
])
result=self._connect(url)
self.assertEquals(result[0], 0)
self.assertTrue(result[1]["Application"] == "Webapp2")
@httpretty.activate
def test_query_status(self):
""" Test query_status """
url1='http://myserver1/status'
url2='http://myserver2/status'
url3='http://myserver3/status'
url4='http://myserver4/status'
url5='http://myserver5/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
bad_content="Hello World..."
httpretty.register_uri(
method=httpretty.GET,
uri=url1,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url2,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url3,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url4,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url5,
status=200,
body=bad_content,
content_type="application/json"
)
results=http.query_status([url1, url2, url3, url4, url5], 3, 2, 3)
##expecting success count = 4 and failure = 1
success_list=filter(lambda x: x[0] == 0, results)
failure_list=filter(lambda x: x[0] == -1, results)
self.assertTrue(len(success_list) == 4)
self.assertTrue(len(failure_list) == 1)
def test_calc_qos(self):
"""Test Calculating QoS """
self.assertTrue(stats.calc_qos(100,99), 99.0)
def test_check_qos(self):
"""Test Check QoS method """
self.assertTrue(stats.check_qos(99.0, 100, 99))
self.assertFalse(stats.check_qos(99.1, 100, 99))
def test_calc_stats(self):
"""Testing Stats Calculation"""
d = [{"Application":"Webapp1","Version":"1.2.1","Uptime":9634484391,"Request_Count":7729359104,
"Error_Count":3394574268,"Success_Count":4334784836},
{"Application":"Webapp1","Version":"1.2.1","Uptime":9634484391,"Request_Count":7729359104,
"Error_Count":3394574268,"Success_Count":4334784836},
{"Application":"Database2","Version":"0.1.0","Uptime":8982039907,"Request_Count":2174448763,
"Error_Count":2001963223,"Success_Count":172485540}]
df = stats.calc_stats(d, ['Application', 'Version'], 'Success_Count', stats.OPERATOR_ADD)
self.assertTrue(df.shape[0], 2) ## expecting two rows.
@httpretty.activate
def test_success_flow(self):
"""Integration test of the http results and stats calculation."""
url1='http://myserver1/status'
url2='http://myserver2/status'
url3='http://myserver3/status'
content=('{"Application":"Webapp2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":4134752620,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
content2=('{"Application":"Database2","Version":"0.0.2",'
'"Uptime":8102471691,"Request_Count":172485540,'
'"Error_Count":2772072365,"Success_Count":1362680255}')
httpretty.register_uri(
method=httpretty.GET,
uri=url1,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url2,
status=200,
body=content,
content_type="application/json"
)
httpretty.register_uri(
method=httpretty.GET,
uri=url3,
status=200,
body=content2,
content_type="application/json"
)
results=http.query_status([url1, url2, url3], 3, 2, 3)
success_list=filter(lambda x: x[0] == 0, results)
failure_list=filter(lambda x: x[0] == -1, results) # expect no failure
self.assertEquals(failure_list, [])
data = [msg for (status, msg) in results]
df = stats.calc_stats(data, [stats.FIELD_APPLICATION, stats.FIELD_VERSION],
stats.FIELD_SUCCESS_COUNT, stats.OPERATOR_ADD)
self.assertTrue(df.shape[0], 2)
pprint(df)
def _connect(self, url):
result=http._get_server_status(url,10,3)
# print result
return result
if __name__ == '__main__':
unittest.main() | mit |
rhyolight/nupic.research | projects/l2_pooling/capacity_test.py | 4 | 61224 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests the capacity of L4-L2 columns
In this test, we consider a set of objects without any shared (feature,
location) pairs and without any noise. One, or more, L4-L2 columns is trained
on all objects.
In the test phase, we randomly pick a (feature, location) SDR and feed it to
the network, and asked whether the correct object can be retrieved.
"""
import argparse
import multiprocessing
import os
import os.path
from pprint import pprint
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
DEFAULT_NUM_LOCATIONS = 5000
DEFAULT_NUM_FEATURES = 5000
DEFAULT_RESULT_DIR_NAME = "results"
DEFAULT_PLOT_DIR_NAME = "plots"
DEFAULT_NUM_CORTICAL_COLUMNS = 1
DEFAULT_COLORS = ("b", "r", "c", "g", 'm', 'y', 'w', 'k')
def _prepareResultsDir(resultBaseName, resultDirName=DEFAULT_RESULT_DIR_NAME):
"""
Ensures that the requested resultDirName exists. Attempt to create it if not.
Returns the combined absolute path to result.
"""
resultDirName = os.path.abspath(resultDirName)
resultFileName = os.path.join(resultDirName, resultBaseName)
try:
if not os.path.isdir(resultDirName):
# Directory does not exist, attempt to create recursively
os.makedirs(resultDirName)
except os.error:
# Unlikely, but directory may have been created already. Double check to
# make sure it's safe to ignore error in creation
if not os.path.isdir(resultDirName):
raise Exception("Unable to create results directory at {}"
.format(resultDirName))
return resultFileName
def getL4Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"columnCount": 150,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"basalPredictedSegmentDecrement": 0.0,
"activationThreshold": 13,
"sampleSize": 25,
"implementation": "ApicalTiebreakCPP",
}
def getL2Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"inputWidth": 2048 * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 6,
"sampleSizeProximal": 10,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 18,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"learningMode": True,
}
def createRandomObjects(numObjects,
numPointsPerObject,
numLocations,
numFeatures):
"""
Create numObjects with non-shared (feature, location) pairs
:param numObjects: number of objects
:param numPointsPerObject: number of (feature, location) pairs per object
:param numLocations: number of unique locations
:param numFeatures: number of unique features
:return: (list(list(tuple)) List of lists of feature / location pairs.
"""
requiredFeatureLocPairs = numObjects * numPointsPerObject
uniqueFeatureLocPairs = numLocations * numFeatures
if requiredFeatureLocPairs > uniqueFeatureLocPairs:
raise RuntimeError("Not Enough Feature Location Pairs")
randomPairIdx = np.random.choice(
np.arange(uniqueFeatureLocPairs),
numObjects * numPointsPerObject,
replace=False
)
randomFeatureLocPairs = (divmod(idx, numFeatures) for idx in randomPairIdx)
# Return sequences of random feature-location pairs. Each sequence will
# contain a number of pairs defined by 'numPointsPerObject'
return zip(*[iter(randomFeatureLocPairs)] * numPointsPerObject)
def createRandomObjectsSharedPairs(numObjects,
numPointsPerObject,
numLocations,
numFeatures):
"""
Create numObjects. (feature, location) pairs may be shared.
:param numObjects: number of objects
:param numPointsPerObject: number of (feature, location) pairs per object
:param numLocations: number of unique locations
:param numFeatures: number of unique features
:return: (list(list(tuple)) List of lists of feature / location pairs.
"""
locations = np.arange(numLocations)
features = np.arange(numFeatures)
objects = []
objectsSets = set()
for _ in xrange(numObjects):
objectLocations = np.random.choice(locations, numPointsPerObject,
replace=False)
objectFeatures = np.random.choice(features, numPointsPerObject,
replace=True)
o = zip(objectLocations, objectFeatures)
# Make sure this is a unique object.
objectAsSet = frozenset(o)
assert objectAsSet not in objectsSets
objectsSets.add(objectAsSet)
objects.append(o)
return objects
def testNetworkWithOneObject(objects, exp, testObject, numTestPoints):
"""
Check whether a trained L4-L2 network can successfully retrieve an object
based on a sequence of (feature, location) pairs on this object
:param objects: list of lists of (feature, location) pairs for all objects
:param exp: L4L2Experiment instance with a trained network
:param testObject: the index for the object being tested
:param numTestPoints: number of test points on the test object
:return:
"""
innerObjs = objects.getObjects()
numObjects = len(innerObjs)
numPointsPerObject = len(innerObjs[0])
testPts = np.random.choice(np.arange(numPointsPerObject),
(numTestPoints,),
replace=False)
testPairs = [objects[testObject][i] for i in testPts]
exp._unsetLearningMode()
exp.sendReset()
overlap = np.zeros((numTestPoints, numObjects))
numL2ActiveCells = np.zeros((numTestPoints))
numL4ActiveCells = np.zeros((numTestPoints))
for step, pair in enumerate(testPairs):
(locationIdx, featureIdx) = pair
for colIdx in xrange(exp.numColumns):
feature = objects.features[colIdx][featureIdx]
location = objects.locations[colIdx][locationIdx]
exp.sensorInputs[colIdx].addDataToQueue(list(feature), 0, 0)
exp.externalInputs[colIdx].addDataToQueue(list(location), 0, 0)
exp.network.run(1)
for colIdx in xrange(exp.numColumns):
numL2ActiveCells[step] += float(len(exp.getL2Representations()[colIdx]))
numL4ActiveCells[step] += float(len(exp.getL4Representations()[colIdx]))
numL2ActiveCells[step] /= exp.numColumns
numL4ActiveCells[step] /= exp.numColumns
overlapByColumn = exp.getCurrentObjectOverlaps()
overlap[step] = np.mean(overlapByColumn, axis=0)
# columnPooler = exp.L2Columns[0]._pooler
# tm = exp.L4Columns[0]._tm
# print "step : {}".format(step)
# print "{} L4 cells predicted : ".format(
# len(exp.getL4PredictedCells()[0])), exp.getL4PredictedeCells()
# print "{} L4 cells active : ".format(len(exp.getL4Representations()[0])), exp.getL4Representations()
# print "L2 activation: ", columnPooler.getActiveCells()
# print "overlap : ", (overlap[step, :])
return overlap, numL2ActiveCells, numL4ActiveCells
def testOnSingleRandomSDR(objects, exp, numRepeats=100, repeatID=0):
"""
Test a trained L4-L2 network on (feature, location) pairs multiple times
Compute object retrieval accuracy, overlap with the correct and incorrect
objects
:param objects: list of lists of (feature, location) pairs for all objects
:param exp: L4L2Experiment instance with a trained network
:param numRepeats: number of repeats
:return: a set of metrics for retrieval accuracy
"""
innerObjs = objects.getObjects()
numObjects = len(innerObjs)
numPointsPerObject = len(innerObjs[0])
overlapTrueObj = np.zeros((numRepeats,))
l2ActivationSize = np.zeros((numRepeats,))
l4ActivationSize = np.zeros((numRepeats,))
confusion = overlapTrueObj.copy()
outcome = overlapTrueObj.copy()
columnPooler = exp.L2Columns[0]._pooler
numConnectedProximal = columnPooler.numberOfConnectedProximalSynapses()
numConnectedDistal = columnPooler.numberOfConnectedDistalSynapses()
result = None
for i in xrange(numRepeats):
targetObject = np.random.choice(np.arange(numObjects))
nonTargetObjs = np.array(
[obj for obj in xrange(numObjects) if obj != targetObject]
)
overlap, numActiveL2Cells, numL4ActiveCells = testNetworkWithOneObject(
objects,
exp,
targetObject,
10
)
lastOverlap = overlap[-1, :]
maxOverlapIndices = (
np.where(lastOverlap == lastOverlap[np.argmax(lastOverlap)])[0].tolist()
)
# Only set to 1 iff target object is the lone max overlap index. Otherwise
# the network failed to conclusively identify the target object.
outcome[i] = 1 if maxOverlapIndices == [targetObject] else 0
confusion[i] = np.max(lastOverlap[nonTargetObjs])
overlapTrueObj[i] = lastOverlap[targetObject]
l2ActivationSize[i] = numActiveL2Cells[-1]
l4ActivationSize[i] = numL4ActiveCells[-1]
testResult = {
"repeatID": repeatID,
"repeatI": i,
"numberOfConnectedProximalSynapses": numConnectedProximal,
"numberOfConnectedDistalSynapses": numConnectedDistal,
"numObjects": numObjects,
"numPointsPerObject": numPointsPerObject,
"l2ActivationSize": l2ActivationSize[i],
"l4ActivationSize": l4ActivationSize[i],
"confusion": confusion[i],
"accuracy": outcome[i],
"overlapTrueObj": overlapTrueObj[i]}
result = (
pd.concat([result, pd.DataFrame.from_dict([testResult])])
if result is not None else
pd.DataFrame.from_dict([testResult])
)
return result
def plotResults(result, ax=None, xaxis="numObjects",
filename=None, marker='-bo', confuseThresh=30, showErrBar=1):
if ax is None:
fig, ax = plt.subplots(2, 2)
numRpts = max(result['repeatID']) + 1
resultsRpts = result.groupby(['repeatID'])
if xaxis == "numPointsPerObject":
x = np.array(resultsRpts.get_group(0).numPointsPerObject)
xlabel = "# Pts / Obj"
x = np.unique(x)
d = resultsRpts.get_group(0)
d = d.groupby(['numPointsPerObject'])
elif xaxis == "numObjects":
x = np.array(resultsRpts.get_group(0).numObjects)
xlabel = "Object #"
x = np.unique(x)
d = resultsRpts.get_group(0)
d = d.groupby(['numObjects'])
accuracy = np.zeros((1, len(x),))
numberOfConnectedProximalSynapses = np.zeros((1, len(x),))
l2ActivationSize = np.zeros((1, len(x),))
confusion = np.zeros((1, len(x),))
for j in range(len(x)):
accuracy[0,j] = np.sum(np.logical_and(d.get_group(x[j]).accuracy == 1,
d.get_group(x[j]).confusion < confuseThresh)) / \
float(len(d.get_group(x[j]).accuracy))
l2ActivationSize[0,j] = np.mean(d.get_group(x[j]).l2ActivationSize)
confusion[0,j] = np.mean(d.get_group(x[j]).confusion)
numberOfConnectedProximalSynapses[0,j] = np.mean(
d.get_group(x[j]).numberOfConnectedProximalSynapses)
if ax is None:
fig, ax = plt.subplots(2, 2)
for rpt in range(1, numRpts):
d = resultsRpts.get_group(rpt)
d = d.groupby(['numObjects'])
accuracyRpt = np.zeros((1, len(x)))
numberOfConnectedProximalSynapsesRpt = np.zeros((1, len(x)))
l2ActivationSizeRpt = np.zeros((1, len(x)))
confusionRpt = np.zeros((1, len(x)))
for j in range(len(x)):
accuracyRpt[0,j] = np.sum(np.logical_and(
d.get_group(x[j]).accuracy == 1,
d.get_group(x[j]).confusion < confuseThresh)) / \
float(len(d.get_group(x[j]).accuracy))
l2ActivationSizeRpt[0,j] = np.mean(d.get_group(x[j]).l2ActivationSize)
confusionRpt[0,j] = np.mean(d.get_group(x[j]).confusion)
numberOfConnectedProximalSynapsesRpt[0,j] = np.mean(
d.get_group(x[j]).numberOfConnectedProximalSynapses)
accuracy = np.vstack((accuracy, accuracyRpt))
confusion = np.vstack((confusion, confusionRpt))
l2ActivationSize = np.vstack((l2ActivationSize, l2ActivationSizeRpt))
numberOfConnectedProximalSynapses = np.vstack((
numberOfConnectedProximalSynapses, numberOfConnectedProximalSynapsesRpt))
if showErrBar==0:
s = 0
else:
s = 1
ax[0, 0].errorbar(x, np.mean(accuracy, 0), yerr=np.std(accuracy, 0)*s,
color=marker)
ax[0, 0].set_ylabel("Accuracy")
ax[0, 0].set_xlabel(xlabel)
ax[0, 0].set_ylim([0.1, 1.05])
ax[0, 0].set_xlim([0, 820])
ax[0, 1].errorbar(x, np.mean(numberOfConnectedProximalSynapses, 0),
yerr=np.std(numberOfConnectedProximalSynapses, 0)*s, color=marker)
ax[0, 1].set_ylabel("# connected proximal synapses")
ax[0, 1].set_xlabel("# Pts / Obj")
ax[0, 1].set_xlabel(xlabel)
ax[0, 1].set_xlim([0, 820])
ax[1, 0].errorbar(x, np.mean(l2ActivationSize, 0), yerr=np.std(l2ActivationSize, 0)*s, color=marker)
ax[1, 0].set_ylabel("l2ActivationSize")
# ax[1, 0].set_ylim([0, 41])
ax[1, 0].set_xlabel(xlabel)
ax[1, 0].set_xlim([0, 820])
ax[1, 1].errorbar(x, np.mean(confusion, 0), yerr=np.std(confusion, 0)*s, color=marker)
ax[1, 1].set_ylabel("OverlapFalseObject")
ax[1, 1].set_ylim([0, 41])
ax[1, 1].set_xlabel(xlabel)
ax[1, 1].set_xlim([0, 820])
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
def runCapacityTest(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType = "MultipleL4L2Columns",
repeat=0):
"""
Generate [numObjects] objects with [numPointsPerObject] points per object
Train L4-l2 network all the objects with single pass learning
Test on (feature, location) pairs and compute
:param numObjects:
:param numPointsPerObject:
:param sampleSize:
:param activationThreshold:
:param numCorticalColumns:
:return:
"""
l4ColumnCount = l4Params["columnCount"]
numInputBits = objectParams['numInputBits']
externalInputSize = objectParams['externalInputSize']
if numInputBits is None:
numInputBits = int(l4ColumnCount * 0.02)
numLocations = objectParams["numLocations"]
numFeatures = objectParams["numFeatures"]
objects = createObjectMachine(
machineType="simple",
numInputBits=numInputBits,
sensorInputSize=l4ColumnCount,
externalInputSize=externalInputSize,
numCorticalColumns=numCorticalColumns,
numLocations=numLocations,
numFeatures=numFeatures
)
exp = L4L2Experiment("capacity_two_objects",
numInputBits=numInputBits,
L2Overrides=l2Params,
L4Overrides=l4Params,
inputSize=l4ColumnCount,
networkType = networkType,
externalInputSize=externalInputSize,
numLearningPoints=3,
numCorticalColumns=numCorticalColumns,
objectNamesAreIndices=True)
if objectParams["uniquePairs"]:
pairs = createRandomObjects(
numObjects,
numPointsPerObject,
numLocations,
numFeatures
)
else:
pairs = createRandomObjectsSharedPairs(
numObjects,
numPointsPerObject,
numLocations,
numFeatures
)
for object in pairs:
objects.addObject(object)
exp.learnObjects(objects.provideObjectsToLearn())
testResult = testOnSingleRandomSDR(objects, exp, 100, repeat)
return testResult
def runCapacityTestVaryingObjectSize(
numObjects=2,
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
expName=None,
cpuCount=None,
l2Params=None,
l4Params=None,
objectParams=None):
"""
Runs experiment with two objects, varying number of points per object
"""
result = None
cpuCount = cpuCount or multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpuCount, maxtasksperchild=1)
l4Params = l4Params or getL4Params()
l2Params = l2Params or getL2Params()
params = [(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
0)
for numPointsPerObject in np.arange(10, 160, 20)]
for testResult in pool.map(invokeRunCapacityTest, params):
result = (
pd.concat([result, testResult])
if result is not None else testResult
)
resultFileName = _prepareResultsDir(
"{}.csv".format(expName),
resultDirName=resultDirName
)
pd.DataFrame.to_csv(result, resultFileName)
def invokeRunCapacityTest(params):
""" Splits out params so that runCapacityTest may be invoked with
multiprocessing.Pool.map() to support parallelism
"""
return runCapacityTest(*params)
def runCapacityTestVaryingObjectNum(numPointsPerObject=10,
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
expName=None,
cpuCount=None,
l2Params=None,
l4Params=None,
objectParams=None,
networkType="MultipleL4L2Columns",
numRpts=1):
"""
Run experiment with fixed number of pts per object, varying number of objects
"""
l4Params = l4Params or getL4Params()
l2Params = l2Params or getL2Params()
cpuCount = cpuCount or multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpuCount, maxtasksperchild=1)
numObjectsList = np.arange(50, 1300, 100)
params = []
for rpt in range(numRpts):
for numObjects in numObjectsList:
params.append((numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType,
rpt))
result = None
for testResult in pool.map(invokeRunCapacityTest, params):
result = (
pd.concat([result, testResult])
if result is not None else testResult
)
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName)
pd.DataFrame.to_csv(result, resultFileName)
def invokeRunCapacityTest(params):
""" Splits out params so that runCapacityTest may be invoked with
multiprocessing.Pool.map() to support parallelism
"""
return runCapacityTest(*params)
def runCapacityTestWrapperNonParallel(numPointsPerObject=10,
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
numObjects = 100,
expName=None,
l2Params=None,
l4Params=None,
objectParams=None,
networkType="MultipleL4L2Columns",
rpt=0):
"""
Run experiment with fixed number of pts per object, varying number of objects
"""
l4Params = l4Params or getL4Params()
l2Params = l2Params or getL2Params()
testResult = runCapacityTest(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType,
rpt)
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName)
if os.path.isfile(resultFileName):
pd.DataFrame.to_csv(testResult, resultFileName, mode = "a", header = False)
else:
pd.DataFrame.to_csv(testResult, resultFileName, mode = "a", header = True)
def invokeRunCapacityTestWrapper(params):
""" Splits out params so that runCapacityTest may be invoked with
multiprocessing.Pool.map() to support parallelism
"""
return runCapacityTestWrapperNonParallel(*params)
def runExperiment1(numObjects=2,
sampleSizeRange=(10,),
numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
Varying number of pts per objects, two objects
Try different sample sizes
"""
objectParams = {'numInputBits': 20,
'externalInputSize': 2400,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
l4Params = getL4Params()
l2Params = getL2Params()
numInputBits = objectParams['numInputBits']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
for sampleSize in sampleSizeRange:
print "sampleSize: {}".format(sampleSize)
l2Params['sampleSizeProximal'] = sampleSize
expName = "capacity_varying_object_size_synapses_{}".format(sampleSize)
runCapacityTestVaryingObjectSize(numObjects,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams=objectParams)
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying points per object x 2 objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large")
legendEntries = []
for sampleSize in sampleSizeRange:
expName = "capacity_varying_object_size_synapses_{}".format(sampleSize)
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numPointsPerObject", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("# sample size {}".format(sampleSize))
plt.legend(legendEntries, loc=2)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_size_summary.pdf"
)
)
def runExperiment2(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different sample sizes
"""
sampleSizeRange = (10,)
numPointsPerObject = 10
l4Params = getL4Params()
l2Params = getL2Params()
objectParams = {'numInputBits': 20,
'externalInputSize': 2400,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
for sampleSize in sampleSizeRange:
print "sampleSize: {}".format(sampleSize)
l2Params['sampleSizeProximal'] = sampleSize
expName = "capacity_varying_object_num_synapses_{}".format(sampleSize)
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(50))
legendEntries = []
for sampleSize in sampleSizeRange:
print "sampleSize: {}".format(sampleSize)
l2Params['sampleSizeProximal'] = sampleSize
expName = "capacity_varying_object_num_synapses_{}".format(sampleSize)
resultFileName = _prepareResultsDir(
"{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("# sample size {}".format(sampleSize))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_summary.pdf"
)
)
def runExperiment3(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different L4 network size
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'thresh': 5})
expParams.append(
{'l4Column': 200, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'thresh': 5})
expParams.append(
{'l4Column': 250, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'thresh': 5})
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} w {} s {} thresh {}".format(
expParam["l4Column"], expParam['w'], expParam['sample'],
expParam['thresh']))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_l4size_summary.pdf"
)
)
def runExperiment4(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
varying number of cortical columns
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'thresh': 5, 'l2Column': 1})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'thresh': 5, 'l2Column': 2})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'thresh': 5, 'l2Column': 3})
for expParam in expParams:
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
numCorticalColumns = expParam['l2Column']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} #cc {} ".format(
expParam['l4Column'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"multiple_column_capacity_varying_object_num_and_column_num_summary.pdf"
)
)
def runExperiment5(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
varying size of L2
calculate capacity by varying number of objects with fixed size
"""
numPointsPerObject = 10
numRpts = 1
numInputBits = 20
externalInputSize = 2400
numL4MiniColumns = 150
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'L2cellCount': 2048, 'L2activeBits': 40, 'w': 20, 'sample': 10, 'thresh': 5,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 40, 'w': 20, 'sample': 10, 'thresh': 5,
'l2Column': 1})
expParams.append(
{'L2cellCount': 6144, 'L2activeBits': 40, 'w': 20, 'sample': 10, 'thresh': 5,
'l2Column': 1})
for expParam in expParams:
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l2Params['cellCount'] = expParam['L2cellCount']
l2Params['sdrSize'] = expParam['L2activeBits']
numCorticalColumns = expParam['l2Column']
l4Params["columnCount"] = numL4MiniColumns
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': externalInputSize,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l2Cells_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["L2cellCount"],
expParam['l2Column'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l2Cells_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["L2cellCount"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L2 cells {}/{} #cc {} ".format(
expParam['L2activeBits'], expParam['L2cellCount'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_L2size.pdf"
)
)
def runExperiment6(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
varying size of L2
calculate capacity by varying number of objects with fixed size
"""
numPointsPerObject = 10
numRpts = 5
numInputBits = 10
externalInputSize = 2400
numL4MiniColumns = 150
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 10, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 20, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 80, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
for expParam in expParams:
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l2Params['cellCount'] = expParam['L2cellCount']
l2Params['sdrSize'] = expParam['L2activeBits']
l2Params['sampleSizeDistal'] = int(l2Params['sdrSize'] / 2)
l2Params['activationThresholdDistal'] = int(l2Params['sdrSize'] / 2) - 1
numCorticalColumns = expParam['l2Column']
l4Params["columnCount"] = numL4MiniColumns
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': externalInputSize,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expName = "multiple_column_capacity_varying_object_sdrSize_{}_l2Cells_{}_l2column_{}".format(
expParam['L2activeBits'], expParam["L2cellCount"], expParam['l2Column'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expName,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_sdrSize_{}_l2Cells_{}_l2column_{}".format(
expParam['L2activeBits'], expParam["L2cellCount"], expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L2 cells {}/{} #cc {} ".format(
expParam['L2activeBits'], expParam['L2cellCount'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_L2_sparsity.pdf"
)
)
def runExperiment7(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different numLocations
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = [
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 16},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 1000},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 5000},
]
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {
'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': expParam['numFeatures'],
'numLocations': expParam['numLocations'],
'uniquePairs': False
}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} locs {} feats {}".format(
expParam["l4Column"], expParam['numLocations'], expParam['numFeatures']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_locations_num_summary.pdf"
)
)
def runExperiment8(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
Try different numFeatures
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = [
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 15, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 150, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 500, 'numLocations': 128},
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'numFeatures': 5000, 'numLocations': 128},
]
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {
'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': expParam['numFeatures'],
'numLocations': expParam['numLocations'],
'uniquePairs': False
}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_locations_{}_num_features_{}_l4column_{}".format(
expParam['numLocations'], expParam['numFeatures'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} locs {} feats {}".format(
expParam["l4Column"], expParam['numLocations'], expParam['numFeatures']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_features_num_summary.pdf"
)
)
def runExperiment9(resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=None):
"""
runCapacityTestVaryingObjectNum()
varying number of cortical columns, 2d topology.
"""
numPointsPerObject = 10
numRpts = 3
objectNumRange = range(10, 1000, 50)
l4Params = getL4Params()
l2Params = getL2Params()
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 4, 'networkType': "MultipleL4L2Columns"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 9, 'networkType': "MultipleL4L2Columns"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 16, 'networkType': "MultipleL4L2Columns"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 4, 'networkType': "MultipleL4L2ColumnsWithTopology"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 9, 'networkType': "MultipleL4L2ColumnsWithTopology"})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3, 'l2Column': 16, 'networkType': "MultipleL4L2ColumnsWithTopology"})
run_params = []
for object_num in reversed(objectNumRange):
for expParam in expParams:
for rpt in range(numRpts):
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
numCorticalColumns = expParam['l2Column']
networkType = expParam['networkType']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "Experiment Params: "
pprint(expParam)
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'], expParam["networkType"])
try:
os.remove(_prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName))
except OSError:
pass
run_params.append((numPointsPerObject,
numCorticalColumns,
resultDirName,
object_num,
expName,
l2Params,
l4Params,
objectParams,
networkType,
rpt))
pool = multiprocessing.Pool(cpuCount or multiprocessing.cpu_count(), maxtasksperchild=1)
pool.map(invokeRunCapacityTestWrapper, run_params, chunksize = 1)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
colormap = plt.get_cmap("jet")
colors = [colormap(x) for x in np.linspace(0., 1., len(expParam))]
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'], expParam["networkType"])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, colors[ploti])
ploti += 1
if "Topology" in expParam["networkType"]:
legendEntries.append("L4 mcs {} #cc {} w/ topology".format(
expParam['l4Column'], expParam['l2Column']))
else:
legendEntries.append("L4 mcs {} #cc {}".format(
expParam['l4Column'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"multiple_column_capacity_varying_object_num_column_num_connection_type_summary.pdf"
)
)
def runExperiment10(numCorticalColumns=DEFAULT_NUM_CORTICAL_COLUMNS,
resultDirName=DEFAULT_RESULT_DIR_NAME,
plotDirName=DEFAULT_PLOT_DIR_NAME,
cpuCount=1):
"""
Try different L4 network size
"""
numPointsPerObject = 10
numRpts = 1
l4Params = getL4Params()
l2Params = getL2Params()
l2Params['cellCount'] = 4096
l2Params['sdrSize'] = 40
expParams = []
expParams.append(
{'l4Column': 100, 'externalInputSize': 2400, 'w': 10, 'sample': 5,
'L2cellCount': 2000, 'L2activeBits': 20, 'thresh': 4})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 15, 'sample': 8,
'L2cellCount': 3000, 'L2activeBits': 30, 'thresh': 6})
expParams.append(
{'l4Column': 200, 'externalInputSize': 2400, 'w': 20, 'sample': 10,
'L2cellCount': 4000, 'L2activeBits': 40, 'thresh': 8})
expParams.append(
{'l4Column': 250, 'externalInputSize': 2400, 'w': 25, 'sample': 13,
'L2cellCount': 5000, 'L2activeBits': 50, 'thresh': 10})
for expParam in expParams:
l4Params["columnCount"] = expParam['l4Column']
numInputBits = expParam['w']
l4Params["activationThreshold"] = int(numInputBits * .6)
l4Params["minThreshold"] = int(numInputBits * .6)
l4Params["sampleSize"] = int(2 * l4Params["activationThreshold"])
l2Params['sampleSizeProximal'] = expParam['sample']
l2Params['minThresholdProximal'] = expParam['thresh']
l2Params['cellCount'] = expParam['L2cellCount']
l2Params['sdrSize'] = expParam['L2activeBits']
l2Params['sampleSizeDistal'] = int(expParam['L2cellCount']*.5)
l2Params['activationThresholdDistal'] = int(expParam['L2cellCount'] * .5)-1
objectParams = {'numInputBits': numInputBits,
'externalInputSize': expParam['externalInputSize'],
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True,}
print "l4Params: "
pprint(l4Params)
print "l2Params: "
pprint(l2Params)
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2cell_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"], expParam['L2cellCount'])
runCapacityTestVaryingObjectNum(numPointsPerObject,
numCorticalColumns,
resultDirName,
expname,
cpuCount,
l2Params,
l4Params,
objectParams,
"MultipleL4L2Columns",
numRpts)
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2cell_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"], expParam['L2cellCount'])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti])
ploti += 1
legendEntries.append("L4 mcs {} w {} s {} thresh {}".format(
expParam["l4Column"], expParam['w'], expParam['sample'],
expParam['thresh']))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_l4l2size_summary.pdf"
)
)
def runExperiments(resultDirName, plotDirName, cpuCount):
# # Varying number of pts per objects, two objects
# runExperiment1(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of objects
# runExperiment2(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# 10 pts per object, varying number of objects, varying L4 size
# runExperiment3(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# 10 pts per object, varying number of objects and number of columns
runExperiment4(resultDirName=resultDirName,
plotDirName=plotDirName,
cpuCount=cpuCount)
# # 10 pts per object, varying number of L2 cells
# runExperiment5(resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying sparsity of L2
# runExperiment6(resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of location SDRs
# runExperiment7(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
#
# # 10 pts per object, varying number of feature SDRs
# runExperiment8(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
# # #10 pts per object, varying number of objects and number of columns
# runExperiment9(resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
# 10 pts per object, varying number of objects, varying L4/L2 size
# runExperiment10(numCorticalColumns=1,
# resultDirName=resultDirName,
# plotDirName=plotDirName,
# cpuCount=cpuCount)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--resultDirName",
default=DEFAULT_RESULT_DIR_NAME,
type=str,
metavar="DIRECTORY"
)
parser.add_argument(
"--plotDirName",
default=DEFAULT_PLOT_DIR_NAME,
type=str,
metavar="DIRECTORY"
)
parser.add_argument(
"--cpuCount",
default=None,
type=int,
metavar="NUM",
help="Limit number of cpu cores. Defaults to `multiprocessing.cpu_count()`"
)
opts = parser.parse_args()
# runExperiments(resultDirName=opts.resultDirName,
# plotDirName=opts.plotDirName,
# cpuCount=opts.cpuCount)
numObjects = 50
numPointsPerObject=10
numCorticalColumns=1
l4Params = getL4Params()
l2Params = getL2Params()
objectParams = {'numInputBits': 20,
'externalInputSize': 2400,
'numFeatures': DEFAULT_NUM_FEATURES,
'numLocations': DEFAULT_NUM_LOCATIONS,
'uniquePairs': True}
runCapacityTest(numObjects,
numPointsPerObject,
numCorticalColumns,
l2Params,
l4Params,
objectParams,
networkType = "MultipleL4L2Columns",
repeat=0) | gpl-3.0 |
mirestrepo/voxels-at-lems | bmvc12/bof/plot_category_accuracy_all_trials.py | 1 | 2424 | #!/usr/bin/env python
# encoding: utf-8
"""
Author: Isabel Restrepo
"""
import os
import sys
import os;
import optparse;
import time;
import sys;
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import glob
main_dir="/Users/isa/Experiments/BOF/helicopter_providence";
clf_name = "svm_gamma_10.0_C100.00"
#ft="FPFH"
#ft="SHOT"
ft="SpinImage"
#ft="ShapeContext"
colors = ['magenta','blue','green', 'red', 'black'];
fig=plt.figure()
ax = fig.add_subplot(111);
plt.hold(True);
plt.autoscale(tight=True);
plt.axis(tight=True);
means = (20, 50, 100, 200, 500);
markers = ['--o', '--s', '--v', '--^']
m=0;
for radius in (10, 20, 30, 45):
feature_name = ft +'_'+ str(radius)
acc_curve = [];
yerr_up=[];
yerr_down=[];
for K in means:
avg_classification_dir = "/Users/isa/Experiments/bof_bmvc12/average/" + feature_name + "/percentile_90"
avg_classification_dir = avg_classification_dir + "/classification_" + str(K);
prf1s_file = avg_classification_dir +'/' + clf_name + "_prf1s.txt"
recall_file = avg_classification_dir +'/' + clf_name + "_recall.txt"
support_file = avg_classification_dir +'/' + clf_name + "_support.txt"
fis = open(prf1s_file, 'r');
prfs = np.genfromtxt(fis);
fis.close();
fis = open(recall_file, 'r');
recall = np.genfromtxt(fis);
fis.close();
fis = open(support_file, 'r');
support = np.genfromtxt(fis);
fis.close();
acc = np.average(prfs[:,1], weights=prfs[:,3]);
acc_curve.append(acc)
avg_recall = np.average(recall, axis=1, weights=support);
print recall;
print avg_recall;
yerr_up.append(np.max(avg_recall) - acc);
yerr_down.append(acc - np.min(avg_recall));
ax.errorbar(means, acc_curve, yerr=[yerr_down, yerr_up], fmt=markers[m], label=feature_name, capsize=12, ms=10, linewidth=2, markeredgewidth=2)
m=m+1;
# ax.errorbar(x, harris_k3_k_means.mean(1), yerr=[yerr_down, yerr_up], fmt='--o', label=labels[3], color=colors[3], capsize=12);
ax.set_xlabel('Number of Clusters',fontsize= 20);
ax.set_ylabel('Recall',fontsize= 20);
#classes= ['Plane', 'House', 'Building', 'Car', 'Parking Lot'];
ax.set_xlim((0,505) );
ax.set_ylim((0.0,1.0));
#ylabels = np.arange(0,1.2,0.2);
plt.setp(ax.get_yticklabels(), fontsize=18)
plt.setp(ax.get_xticklabels(), fontsize=18)
plt.legend(loc='lower center', frameon=False);
plt.show();
| bsd-2-clause |
Mrngilles/matplotlib-styles | examples/test_functions.py | 1 | 1342 | import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spsp
PTS_NUM = 200
def sinplot(num = 5):
fig, ax = plt.subplots()
x = np.linspace(0, 14, PTS_NUM)
for i in xrange(1, num):
ax.plot(x, np.sin(x + 0.5*i)*(num-i), 'o-', label="i = {}".format(i))
ax.set_title("Modified sinus function")
ax.set_xlabel("x label")
ax.set_ylabel("y label")
return fig, ax
def bessel_plot(num=5):
fig, ax = plt.subplots()
x = np.linspace(0, 20, PTS_NUM)
for v in xrange(1, num):
y = spsp.jv(v, x)
plt.plot(x, y, 'o-', label=r"$J_{}$".format(v))
ax.set_title("Bessel functions")
ax.set_xlabel("x label")
ax.set_ylabel("y label")
return fig, ax
def fill_plot():
fig, ax = plt.subplots()
x = np.linspace(0, 20, PTS_NUM)
y1 = np.sin(x)+(x/2.)
y2 = np.sin(x)+x
plt.fill_between(x, y1, y2, label="Filled")
ax.set_title("Filled between")
ax.set_xlabel("x label")
ax.set_ylabel("y label")
return fig, ax
def exp_mx(num=3):
fig, ax = plt.subplots()
x = np.linspace(0, 10, PTS_NUM)
for v in xrange(1, num):
y = np.exp(-x)*(x**v)
plt.plot(x, y, 'o-', label=r"$e^{{-x^{}}}$".format(v))
ax.set_title("Exponential functions")
ax.set_xlabel("x label")
ax.set_ylabel("y label")
return fig, ax
| mit |
fyffyt/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
bjanesh/odi-tools | docs/source/conf.py | 1 | 11137 | # -*- coding: utf-8 -*-
#
# odi-tools documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 17 15:20:27 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('example/'))
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ['astropy.photutils','numpy','astropy','pyraf','numpy.ma',
'matplotlib.pyplot','astropy.io','astropy.wcs',
'matplotlib.pylab','astropy.stats','astropy.convolution'
,'astropy.units','pandas','astropy.convolution',
'astropy.table','astropy.coordinates','astropy.extern',
'astropy.utils.misc','astropy.utils.exceptions',
'astropy.wcs.utils','astropy.nddata','numpy.lib.index_tricks',
'photutils.detection','scipy.ndimage','photutils.utils',
'astropy.visualization.mpl_normalize','astropy.modeling',
'tqdm','gaia.tap','astroquery.vizier']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc','sphinx.ext.napoleon','sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odi-tools'
copyright = u'2017, Owen Boberg, Bill Janesh'
author = u'Owen Boberg, Bill Janesh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'odi-tools v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'odi-toolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'odi-tools.tex', u'odi-tools Documentation',
u'Owen Boberg, Bill Janesh', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'odi-tools', u'odi-tools Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'odi-tools', u'odi-tools Documentation',
author, 'odi-tools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| bsd-3-clause |
Mazecreator/tensorflow | tensorflow/contrib/timeseries/examples/known_anomaly.py | 53 | 6786 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def train_and_evaluate_exogenous(csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.contrib.layers.sparse_column_with_keys(
column_name="is_changepoint", keys=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.contrib.layers.one_hot_column(
sparse_id_column=string_feature)
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=[one_hot_feature],
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with
# "leaky" updates which add unnecessary uncertainty to the model even when
# there is no changepoint.
exogenous_update_condition=
lambda times, features: tf.equal(features["is_changepoint"], "yes"))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly", *train_and_evaluate_exogenous())
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
liyu1990/sklearn | sklearn/model_selection/tests/test_validation.py | 20 | 27961 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from test_split import MockClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y)
def test_cross_val_score_predict_labels():
# Check if ValueError (when labels is None) propagates to cross_val_score
# and cross_val_predict
# And also check if labels is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_label, _, pvalue_label = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, labels=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
p = np.arange(100)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
teuben/masc | examples/fitsdiff3.py | 2 | 1131 | #! /usr/bin/env python
#
# This routine can diff images from its neighbors. For a series i=1,N
# this can loop over i=2,N-2 to produce N-2 difference images
#
# B_i = A_i - (A_i-1 + A_i+1) / 2
#
# Optionally some hanning smoothing could be applied to the difference
# image to improved source detection
#
from __future__ import print_function
import glob
import sys
import shutil
import os
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
if False:
# first one
f1 = 'MASN01-2018-03-24T01-30-16-112Z.fits'
f2 = 'MASN01-2018-03-24T01-29-17-685Z.fits' # has object
f3 = 'MASN01-2018-03-24T01-28-19-258Z.fits'
else:
# second one
f1 = 'MASN01-2018-03-24T01-55-35-096Z.fits'
f2 = 'MASN01-2018-03-24T01-56-33-509Z.fits'
f3 = 'MASN01-2018-03-24T01-57-31-931Z.fits'
hdu1 = fits.open(f1)
hdu2 = fits.open(f2)
hdu3 = fits.open(f3)
h2 = hdu2[0].header
d1 = hdu1[0].data
d2 = hdu2[0].data
d3 = hdu3[0].data
print(f1,d1.min(),d1.max())
print(f2,d2.min(),d2.max())
print(f3,d3.min(),d3.max())
diff = d2 - 0.5*(d2+d3)
fits.writeto('diff.fits',diff,h2,overwrite=True)
| mit |
nipy/brainx | brainx/recarrutil.py | 2 | 6756 | """Some utilities for manipulating recarrays.
Warning
-------
This module should *never* be imported as 'import *'
"""
import numpy as np
import numpy.testing as nt
import sys
# The functionality in this module is now better provided by
# Pandas' DataFrame -- http://pandas.pydata.org/
sys.stderr.write('brainx.recarrutil will be removed,'
' install pandas instead\n')
# XXX - It's probably OK to import something, but for now let's ban * imports
# altogether .
__all__ = []
#-----------------------------------------------------------------------------
# Functions and public utilities
#-----------------------------------------------------------------------------
def extrude(arr,flatten=False):
"""Create a view of a recarray with one extra 'extruded' dimension.
XXX - document more...
"""
dt = arr.dtype
fieldtypes = [ v[0] for v in dt.fields.values() ]
if len(set(fieldtypes)) > 1:
raise ValueError("dtype of recarray must be uniform")
newdtype = fieldtypes[0]
nfields = len(dt.fields)
# If axis is None, for a normal array this means flatten everything and
# return a single number. In our case, we actually want to keep the last
# dimension (the "extruded" one) alive so that we can reconstruct the
# recarray in the end.
if flatten:
newshape = (arr.size,nfields)
else:
newshape = arr.shape + (nfields,)
# Make the new temp array we'll work with
return np.reshape(arr.view(newdtype),newshape)
def intrude(arr,dtype):
"""Intrude a recarray by 'flattening' its last dimension into a composite
dtype.
XXX - finish doc
"""
outshape = arr.shape[:-1]
return (np.reshape(arr.view(dtype),outshape)).view(np.recarray)
def offset_axis(axis):
"""Axis handling logic that is generic to all reductions."""
flatten = axis is None
if flatten:
axis = 0
else:
if axis < 0:
# The case of a negative input axis needs compensation, because we
# are adding a dimension by ourselves
axis -= 1
return flatten, axis
def reduction_factory(name):
"""Create a reduction operation for a given method name.
"""
def op(arr, axis=None):
# XXX what the hell is this logic?
flatten, axis = offset_axis(axis)
newarr = extrude(arr,flatten)
# Do the operation on the new array
method = getattr(newarr,name)
result = method(axis)
# Make the output back into a recarray of the original dtype
return intrude(result, arr.dtype)
doc = "%s of a recarray, preserving its structure." % name
op.__doc__ = doc
op.func_name = name
return op
# For methods in the array interface that take an axis argument, the pattern is
# always the same: extrude, operate, intrude. So we just auto-generate these
# functions here.
reduction_names = ['mean', 'std', 'var', 'min', 'max',
'sum', 'cumsum', 'prod', 'cumprod' ]
for fname in reduction_names:
exec("%s = reduction_factory('%s')" % (fname, fname))
def binop_factory(func):
"""Create a binary operation for a given name.
"""
def op(a1, a2, out=None):
new_a1 = extrude(a1)
new_a2 = extrude(a2)
if out is not None:
out = extrude(out)
# Do the operation on the new array
if out is None:
result = func(new_a1, new_a2)
else:
result = func(new_a1, new_a2, out)
# Make the output back into a recarray of the original dtype
return intrude(result, a1.dtype)
doc = "Binary %s of two recarrays, preserving their structure." % name
op.__doc__ = doc
op.func_name = name
return op
# For methods in the array interface that take an axis argument, the pattern is
# always the same: extrude, operate, intrude. So we just auto-generate these
# functions here.
binops = [('add', np.add), ('subtract', np.subtract),
('multiply', np.multiply), ('divide', np.divide) ]
#binops = [('add',np.add), np.subtract, np.multiply, np.divide ]
for name, func in binops:
exec("%s = binop_factory(func)" % name)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_mean_zero():
dt = np.dtype(dict(names=['x','y'], formats=[float,float]))
z = np.zeros((2,3), dt)
nt.assert_equal(mean(z),z)
return 1
def mk_xyz():
"""Test utility, make x, y, z arrays."""
dt = np.dtype(dict(names=['x','y'],formats=[float,float]))
x = np.arange(6,dtype=float).reshape(2,3)
y = np.arange(10,16,dtype=float).reshape(2,3)
z = np.empty( (2,3), dt).view(np.recarray)
z.x = x
z.y = y
return x, y, z
def mk_xyzw():
"""Test utility, make x, y, z, w arrays."""
x, y, z = mk_xyz()
w = z.copy()
w.x *= 2
w.y *= 2
return x, y, z, w
def test_reductions():
x, y, z = mk_xyz()
for fname in reduction_names:
reduction = eval(fname)
xmeth = getattr(x, fname)
ymeth = getattr(y, fname)
for axis in [None,0,1,-1,-2]:
zred = reduction(z,axis)
nt.assert_equal(zred.x, xmeth(axis))
nt.assert_equal(zred.y, ymeth(axis))
def test_binops():
x, y, z, w = mk_xyzw()
binop_names = [n for (n, op) in binops]
for fname in binop_names:
op = eval(fname)
npop = getattr(np, fname)
opres = op(z,w)
nt.assert_equal(opres.x, npop(z.x, w.x))
nt.assert_equal(opres.y, npop(z.y, w.y))
# Test support utilities
def eval_tests(testgen):
"""Little utility to consume a nose-compliant test generator.
Returns
-------
The number of executed tests. An exception is raised if any fails."""
return len([ t[0](*t[1:]) for t in testgen() ])
# Mark it as not being a test itself, so nose doesn't try to run it
eval_tests.__test__ = False
def run_test_suite():
"""Call all our tests in sequence.
This lets us run the script as a test suite without needing nose or any
other test runner for simple cases"""
from time import clock
# Initialize counters
ntests = 0
start = clock()
# Call the tests and count them
ntests += test_mean_zero()
ntests += eval_tests(test_reductions)
ntests += eval_tests(test_binops)
# Stop clock and summarize
end = clock()
print('-'*70)
print("Ran %s tests in %.3f" % (ntests, end-start))
print('\nOK')
run_test_suite.__test__ = False
# If run as a script, just run all the tests and print summary if successful
if __name__ == '__main__':
run_test_suite()
| bsd-3-clause |
wesm/statsmodels | scikits/statsmodels/graphics/tsaplots.py | 2 | 2369 |
import numpy as np
#copied/moved from sandbox/tsa/example_arma.py
def plotacf(ax, corr, lags=None, usevlines=True, **kwargs):
"""
Plot the auto or cross correlation.
lags on horizontal and correlations on vertical axis
Note: adjusted from matplotlib's pltxcorr
Parameters
----------
ax : matplotlib axis or plt
ax can be matplotlib.pyplot or an axis of a figure
lags : array or None
array of lags used on horizontal axis,
if None, then np.arange(len(corr)) is used
corr : array
array of values used on vertical axis
usevlines : boolean
If true, then vertical lines and markers are plotted. If false,
only 'o' markers are plotted
**kwargs : optional parameters for plot and axhline
these are directly passed on to the matplotlib functions
Returns
-------
a : matplotlib.pyplot.plot
contains markers
b : matplotlib.collections.LineCollection
returned only if vlines is true, contains vlines
c : instance of matplotlib.lines.Line2D
returned only if vlines is true, contains axhline ???
Data are plotted as ``plot(lags, c, **kwargs)``
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
See Also
--------
:func:`~matplotlib.pyplot.xcorr`
:func:`~matplotlib.pyplot.acorr`
mpl_examples/pylab_examples/xcorr_demo.py
"""
if lags is None:
lags = np.arange(len(corr))
else:
if len(lags) != len(corr):
raise ValueError('lags and corr must be equal length')
if usevlines:
b = ax.vlines(lags, [0], corr, **kwargs)
c = ax.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a = ax.plot(lags, corr, **kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = ax.plot(lags, corr, **kwargs)
b = c = None
return a, b, c
| bsd-3-clause |
ravindrapanda/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions_test.py | 59 | 13552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testFillArraySmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArraySmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[32, 32], dtype=np.bool).tolist() +
np.ones(shape=[32, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.bool)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.bool).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.bool)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
if __name__ == "__main__":
test.main()
| apache-2.0 |
kHarshit/DAT210x_Microsoft | Module4/assignment1.py | 1 | 3417 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import datetime
from mpl_toolkits.mplot3d import Axes3D
from plyfile import PlyData, PlyElement
# Every 100 data samples, we save 1. If things run too
# slow, try increasing this number. If things run too fast,
# try decreasing it... =)
reduce_factor = 100
# Look pretty...
matplotlib.style.use('ggplot')
# Load up the scanned armadillo
plyfile = PlyData.read('Datasets/stanford_armadillo.ply')
armadillo = pd.DataFrame({
'x':plyfile['vertex']['z'][::reduce_factor],
'y':plyfile['vertex']['x'][::reduce_factor],
'z':plyfile['vertex']['y'][::reduce_factor]
})
def do_PCA(armadillo):
#
# TODO: Write code to import the libraries required for PCA.
# Then, train your PCA on the armadillo dataframe. Finally,
# drop one dimension (reduce it down to 2D) and project the
# armadillo down to the 2D principal component feature space.
#
# NOTE: Be sure to RETURN your projected armadillo!
# (This projection is actually stored in a NumPy NDArray and
# not a Pandas dataframe, which is something Pandas does for
# you automatically. =)
#
# .. your code here ..
return None
def do_RandomizedPCA(armadillo):
#
# TODO: Write code to import the libraries required for
# RandomizedPCA. Then, train your RandomizedPCA on the armadillo
# dataframe. Finally, drop one dimension (reduce it down to 2D)
# and project the armadillo down to the 2D principal component
# feature space.
#
# NOTE: Be sure to RETURN your projected armadillo!
# (This projection is actually stored in a NumPy NDArray and
# not a Pandas dataframe, which is something Pandas does for
# you automatically. =)
#
# NOTE: SKLearn deprecated the RandomizedPCA method, but still
# has instructions on how to use randomized (truncated) method
# for the SVD solver. To find out how to use it, set `svd_solver`
# to 'randomized' and check out the full docs here
#
# http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
#
# Deprecated Method: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.RandomizedPCA.html
#
# .. your code here ..
return None
# Render the Original Armadillo
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title('Armadillo 3D')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(armadillo.x, armadillo.y, armadillo.z, c='green', marker='.', alpha=0.75)
# Time the execution of PCA 5000x
# PCA is ran 5000x in order to help decrease the potential of rogue
# processes altering the speed of execution.
t1 = datetime.datetime.now()
for i in range(5000): pca = do_PCA(armadillo)
time_delta = datetime.datetime.now() - t1
# Render the newly transformed PCA armadillo!
if not pca is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('PCA, build time: ' + str(time_delta))
ax.scatter(pca[:,0], pca[:,1], c='blue', marker='.', alpha=0.75)
# Time the execution of rPCA 5000x
t1 = datetime.datetime.now()
for i in range(5000): rpca = do_RandomizedPCA(armadillo)
time_delta = datetime.datetime.now() - t1
# Render the newly transformed RandomizedPCA armadillo!
if not rpca is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('RandomizedPCA, build time: ' + str(time_delta))
ax.scatter(rpca[:,0], rpca[:,1], c='red', marker='.', alpha=0.75)
plt.show()
| mit |
phdowling/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
poryfly/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
wiheto/teneto | teneto/classes/bids.py | 1 | 22842 | """TenetoBIDS is a class to use Teneto functions with data organized with BIDS (neuroimaging data)."""
import os
import inspect
import json
# import bids
import importlib
import numpy as np
import pandas as pd
import bids
from .. import __path__ as tenetopath
from .. import __version__ as tenetoversion
from ..neuroimagingtools import load_tabular_file, get_sidecar
#from .network import TemporalNetwork
class TenetoBIDS:
"""
Class for analysing data in BIDS.
TenetoBIDS allows for an analysis to be performed across a dataset.
All different functions from Teneto can be applied to all files in a dataset organized in BIDS.
Data should be first preprocessed (e.g. fMRIPrep).
Parameters
----------
bids_dir : str
string to BIDS directory
selected_pipeline : str or dict
the directory that is in the bids_dir/derivatives/<selected_pipeline>/.
This fine will be used as the input to any teneto function (first argument).
If multiple inputs are required for a function, then you can specify:
{'netin': 'tvc',
'communities': 'coms'}
With this, the input for netin with be from bids_dir/derivatives/[teneto-]tvc/,
and the input for communities will be from bids_dir/derivatives/[teneto-]coms/.
The keys in this dictionary must match the names of the teneto funciton inputs.
bids_filter : dict
history : bool
update_pipeline : bool
If true, the output_pipeline becomes the new selected_pipeline
exist_ok : bool
If False, will raise an error if the output directory already exist_ok.
If True, will not raise an error.
This can lead to files being overwritten, if desc is not set.
nettsv : str can be nn-t or ijt.
nn-t means networks are node-node x time.
ijt means daframs are ijt columns.
"""
def __init__(self, bids_dir, selected_pipeline, bids_filter=None, bidsvalidator=False,
update_pipeline=True, history=None, exist_ok=False, layout=None, nettsv='nn-t'):
if layout is None:
self.BIDSLayout = bids.BIDSLayout(bids_dir, derivatives=True, validate=bidsvalidator)
else:
self.BIDSLayout = layout
self.bids_dir = bids_dir
self.selected_pipeline = selected_pipeline
self.nettsv = nettsv
self.bids_filter = {} if bids_filter is None else bids_filter
if history is not None:
self.history = {}
self.exist_ok = exist_ok
self.update_pipeline = update_pipeline
with open(tenetopath[0] + '/config/tenetobids/tenetobids_description.json') as f:
self.tenetobids_description = json.load(f)
self.tenetobids_description['PipelineDescription']['Version'] = tenetoversion
with open(tenetopath[0] + '/config/tenetobids/tenetobids_structure.json') as f:
self.tenetobids_structure = json.load(f)
# def set_selected_pipeline(self, selected_pipeline):
# bids.
def update_bids_layout(self):
"""
Function that upddates to new bids l
"""
self.BIDSLayout = bids.BIDSLayout(self.bids_dir, derivatives=True)
def create_output_pipeline(self, runc_func, output_pipeline_name, exist_ok=None):
"""Creates the directories of the saved file.
Parameters
----------
output_pipeline : str
name of output pipeline
exist_ok : bool
If False, will raise error if pipeline already exist_ok.
If True, will not raise an error.
This can lead to files being overwritten, if desc is not set.
If None, will use the exist_ok set during init.
Returns
-------
Creates the output pipeline directory in:
bids_dir/teneto-[output_pipeline]/
"""
if exist_ok is not None:
self.exist_ok = exist_ok
output_pipeline = 'teneto-'
output_pipeline += runc_func.split('.')[-1]
output_pipeline = output_pipeline.replace('_', '-')
if output_pipeline_name is not None:
output_pipeline += '_' + output_pipeline_name
output_pipeline_path = self.bids_dir + '/derivatives/' + output_pipeline
if os.path.exists(output_pipeline_path) and not self.exist_ok:
raise ValueError(
'Output_pipeline already exists. Set exist_ok to True if this is desired behaviour.')
os.makedirs(output_pipeline_path, exist_ok=self.exist_ok)
# Initiate with dataset_description
datainfo = self.tenetobids_description.copy()
datainfo['PipelineDescription']['Name'] = output_pipeline
with open(output_pipeline_path + '/dataset_description.json', 'w') as fs:
json.dump(datainfo, fs)
self.update_bids_layout()
return output_pipeline
def run(self, run_func, input_params, output_desc=None, output_pipeline_name=None, bids_filter=None, update_pipeline=True, exist_ok=None, troubleshoot=False):
"""Runs a runction on the selected files.
Parameters
---------------
run_func : str
str should correspond to a teneto function.
So to run the funciton teneto.timeseries.derive_temporalnetwork
the input should be: 'timeseries.derive_temporalnetwork'
input_params : dict
keyword and value pairing of arguments for the function being run.
The input data to each function will be located automatically.
This input_params does not need to include the input network.
For any other input that needs to be loaded loaded within the teneto_bidsstructure
(communities, events, confounds),
you can pass the value "bids" if they can be found within the current selected_pipeline.
If they are found within a different selected_pipeline, type "bids_[selected_pipeline]".
output_desc : str
If none, no desc is used (removed any previous file)
If 'keep', then desc is preserved.
If any other str, desc is set to that string
output_pipeline_name : str
If set, then the data is saved in teneto_[functionname]_[output_pipeline_name].
If run_func is teneto.timeseries.derive_temporalnetwork and output_pipeline_name
is jackknife then then the pipeline the data is saved in is
teneto-generatetemporalnetwork_jackknife
update_pipeline : bool
If set to True (default), then the selected_pipeline updates to output of function
exist_ok : bool
If set to True, then overwrites direcotry is possible.
troubleshoot : bool
If True, prints out certain information during running.
Useful to run if reporting a bug.
"""
if exist_ok is not None:
self.exist_ok = exist_ok
# Import teneto if it has not been already
if 'teneto' not in globals():
teneto = importlib.import_module('teneto')
func = teneto
for f in self.tenetobids_structure[run_func]['module'].split('.'):
func = getattr(func, f)
functype = self.tenetobids_structure[run_func]['functype']
func = getattr(func, run_func)
# Only set up an output pipeline if the functype is ondata
if functype == 'on_data':
output_pipeline = self.create_output_pipeline(
run_func, output_pipeline_name, self.exist_ok)
input_files = self.get_selected_files(run_func.split('.')[-1])
if not input_files:
raise ValueError('No input files')
if troubleshoot:
self.troubleshoot('Initial input files', {'input_files': input_files})
# Check number of required arguments for the function
funcparams, get_confounds = self._check_run_function_args(func, input_params, functype)
good_files = bad_files = 0
for f in input_files:
f_entities = f.get_entities()
if get_confounds == 1:
input_params['confounds'] = self.get_aux_file(f, filetype='confounds')
data, sidecar = self.load_file(f)
if troubleshoot:
self.troubleshoot('Input file name', {'f': f,
'f_entities': f_entities,
'sidecar': sidecar})
if 'sidecar' in dict(funcparams):
input_params['sidecar'] = sidecar
if data is None:
# Skip if data not found
bad_files += 1
else:
if functype == 'on_data':
result = func(data, **input_params)
# if sidecar is in input_params, then sidecar is also returned
if 'sidecar' in dict(funcparams):
result, sidecar = result
# if output_desc is None, then keep desc
if output_desc is None and 'desc' in f_entities:
f_entities.pop('desc')
elif output_desc == 'keep':
pass
elif output_desc is not None:
f_entities['desc'] = output_desc
f_entities.update(
self.tenetobids_structure[run_func.split('.')[-1]]['output'])
output_pattern = '/sub-{subject}/[ses-{session}/]func/sub-{subject}[_ses-{ses}][_run-{run}]_task-{task}[_desc-{desc}]_{suffix}.{extension}'
save_name = self.BIDSLayout.build_path(
f_entities, path_patterns=output_pattern, validate=False)
save_path = self.bids_dir + '/derivatives/' + output_pipeline
if troubleshoot:
self.troubleshoot('File name consruction', {'f_entities': f_entities,
'save_name': save_name,
'save_path': save_path})
# Exist ok here has to be true, otherwise multiple runs causes an error
# Any exist_ok is caught in create pipeline.
os.makedirs(
'/'.join((save_path + save_name).split('/')[:-1]), exist_ok=True)
# Save file
# Probably should check the output type in tenetobidsstructure
# Table needs column header
if isinstance(result, np.ndarray):
if len(result.shape) == 3:
# Should be made hdf5 at sometime
# Idea here is to make 3D array to 2D by concatenating node dimensions.
# At reload: to ([np.sqrt(shape[0]), np.sqrt(shape[0]), np.sqrt(shape[1])])
shape = result.shape
result = result.reshape([shape[0] * shape[1], shape[2]])
result = pd.DataFrame(result)
elif len(result.shape) == 2:
result = pd.DataFrame(result)
elif len(result.shape) == 1:
result = pd.Series(result)
else:
raise ValueError(
'Output was array with more than 3 dimensions (unexpected)')
elif isinstance(result, list):
result = pd.DataFrame(result)
elif isinstance(result, (int, float)):
result = pd.Series(result)
if isinstance(result, (pd.DataFrame, pd.Series)):
result.to_csv(save_path + save_name, sep='\t', header=True)
else:
raise ValueError('Unexpected output type')
# add information to sidecar
sidecar['DerivativeSource'] = f.path
sidecar['TenetoFunction'] = {}
sidecar['TenetoFunction']['Name'] = run_func
# For aux_input more is needed here too.
if get_confounds == 1:
input_params['confounds'] = 'Loaded automatically via TenetoBIDS'
elif 'confounds' in input_params:
input_params['confounds'] = 'Passed as argument'
if 'sidecar' in input_params:
input_params['sidecar'] = 'Loaded automatically via TenetoBIDS'
# Loop through input params content and make any nparray input to list for sidecar
sidecar['TenetoFunction']['Parameters'] = {}
for key, value in input_params.items():
if teneto.utils.is_jsonable(value):
sidecar['TenetoFunction']['Parameters'][key] = input_params[key]
else:
if isinstance(input_params[key], np.ndarray):
sidecar['TenetoFunction']['Parameters'][key] = input_params[key].tolist()
else:
print('Warning: Dropping input (' + key + ') from sidecar (not JSONable).')
elif functype == 'on_sidecar':
sidecar = func(**input_params)
update_pipeline = False
save_path = f.dirname + '/'
save_name = f.filename
# Save sidecar
with open(save_path + save_name.replace('.tsv', '.json'), 'w') as f:
json.dump(sidecar, f)
good_files += 1
report = '## ' + run_func + '\n'
report += str(good_files) + ' files were included (' + \
str(bad_files) + ' excluded from run)'
self.report = report
if update_pipeline:
if functype == 'on_data':
self.selected_pipeline = output_pipeline
# Create new bids_filter dictionary that only contains
# sub/ses/run/task as other tags are dropped.
bids_filter = dict(self.bids_filter)
self.bids_filter = {}
bids_filters_allowed = ['subject', 'ses', 'run', 'task']
[self.update_bids_filter({f: bids_filter[f]})
for f in bids_filters_allowed
if f in bids_filter]
self.update_bids_layout()
def _check_run_function_args(self, func, input_params, functype):
"""
Helper function for TenetoBIDS.run.
Function checks that the input parametes match the function.
Returns
========
funcparams : dict
parameters of the input function
get_confounds : bool
1 if confound files need to be loaded.
"""
sig = inspect.signature(func)
funcparams = sig.parameters.items()
required_args = 0
input_args = 0
for p_name, p in funcparams:
if p.default == inspect._empty:
required_args += 1
if p_name in input_params:
input_args += 1
get_confounds = 0
expected_arg_defecit = 1
if 'sidecar' in dict(funcparams) and functype == 'on_data':
expected_arg_defecit += 1
# Calculate the different betwee n required and input arguments
arg_diff = required_args - input_args
if arg_diff != expected_arg_defecit:
# Three conditoinals to be met in order to get confounds
confounds_not_input = 'confounds' not in input_params
confounds_in_func = 'confounds' in dict(funcparams)
arg_needed = arg_diff == expected_arg_defecit + 1
if confounds_not_input and confounds_in_func and arg_needed:
# Get confounds automatically
get_confounds = 1
else:
raise ValueError(
'Expecting one unspecified input argument.\
Enter all required input arguments in input_params except for the data files.')
return funcparams, get_confounds
def get_selected_files(self, output=None):
"""
Uses information in selected_pipeline and the bids layout and shows the files that will be processed when calling TenetoBIDS.run().
If you specify a particular output, it will tell you which files will get selected for that output
"""
if output is not None:
filters = self.tenetobids_structure[output]['input']
else:
# input can only be these files
filters = {'extension': ['.tsv', '.nii', '.nii.gz']}
# Add predefined filters to the check
filters.update(self.bids_filter)
return self.BIDSLayout.get(scope=self.selected_pipeline, **filters)
def get_run_options(self, for_selected=True):
"""Returns the different function names that can be called using TenetoBIDS.run()
Parameters
===========
for_selected : bool
If True, only return run options for the selected files.
If False, returns all options.
Returns
========
options : str
a list of options that can be run.
"""
funcs = self.tenetobids_structure.keys()
if for_selected:
funcs_filter = []
files = self.get_selected_files()
suffix = [f.get_entities()['suffix'] for f in files]
suffix = list(np.unique(suffix))
for t in list(funcs):
s = self.tenetobids_structure[t]['input']['suffix']
if isinstance(s, str):
s = [s]
for su in suffix:
if su in s:
funcs_filter.append(t)
funcs = sorted(list(set(funcs_filter)))
return ', '.join(funcs)
def update_bids_filter(self, filter_addons):
"""Updates TenetoBIDS.bids_filter
Parameters
==========
filter_addons : dict
dictionary that updates TenetoBIDS.bids_filter
"""
self.bids_filter.update(filter_addons)
def get_aux_file(self, bidsfile, filetype='confounds'):
"""Tries to automatically get auxiliary data for input files, and loads it
Paramters
==========
bidsfile : BIDSDataFile or BIDSImageFile
The BIDS file that the confound file is gong to be matched.
filetype : string
Can be confounds, events.
Specified if you want to get the confound or events data.
"""
if filetype == 'confounds':
suffix = 'regressors'
elif filetype == 'events':
suffix = 'events'
else:
raise ValueError('unknown file type')
# Get the entities of the filename
file_entities = bidsfile.get_entities()
# Ensure that the extension and suffix are correct
file_entities['suffix'] = suffix
file_entities['extension'] = '.tsv'
if 'desc' in file_entities:
file_entities.pop('desc')
auxfile = self.BIDSLayout.get(**file_entities)
if len(auxfile) == 0:
raise ValueError('Non auxiliary file (type: ' + filetype + ') found')
elif len(auxfile) > 1:
raise ValueError('More than one auxiliary file (type: ' + filetype + ') found')
# Load the aux file
aux = load_tabular_file(
auxfile[0].dirname + '/' + auxfile[0].filename, index_col=False)
return aux
def load_data(self, bids_filter=None):
"""Returns data, default is the input data.
bids_filter : dict
default is None. If set, load data will load all files found by the bids_filter.
Any preset BIDS filter is used as well, but will get overwritten by this input.
"""
if bids_filter is None:
files = self.get_selected_files()
else:
filters = dict(self.bids_filter)
filters.update(bids_filter)
files = self.BIDSLayout.get(**filters)
data = {}
for f in files:
if f.filename in data:
raise ValueError('Same name appears twice in selected files')
data[f.filename], _ = self.load_file(f)
return data
def load_file(self, bidsfile):
"""Aux function to load the data and sidecar from a BIDSFile
Paramters
==========
bidsfile : BIDSDataFile or BIDSImageFile
The BIDS file that the confound file is gong to be matched.
"""
# Get sidecar and see if file has been rejected at a previous step
# (note sidecar could be called in input_files, but this will require loading sidecar twice)
sidecar = get_sidecar(bidsfile.dirname + '/' + bidsfile.filename)
if not sidecar['BadFile']:
if hasattr(bidsfile, 'get_image'):
data = bidsfile.get_image()
elif hasattr(bidsfile, 'get_df'):
# This can be changed if/when pybids is updated. Assumes index_col=0 in tsv file
data = load_tabular_file(
bidsfile.dirname + '/' + bidsfile.filename)
else:
data = None
# Since temporal networks are currently saved in 2D collapsed arrays
# The following checks if they should be resized, and resizes
if '_temporalconnectivity.tsv' in bidsfile.filename:
dimord = sidecar['TenetoFunction']['Parameters']['params']['dimord']
if (self.nettsv == 'nn-t' or dimord == 'node,time'):
n_nodes = int(np.sqrt(data.shape[0]))
n_time = data.shape[1]
data = data.values.reshape([n_nodes, n_nodes, n_time])
print(data.shape)
return data, sidecar
def troubleshoot(self, stepname, status):
"""
Prints ongoing info to assist with troubleshooting
"""
print('******** TROUBLESHOOT STEP: ' + stepname + ', start ********')
for step in status:
print('++++++++')
print(step)
print('------')
print(status[step])
print('++++++++')
print('******** TROUBLESHOOT STEP: ' + stepname + ', end ********')
def load_events(self):
"""
Loads event data for selected files
"""
input_files = self.get_selected_files()
events = {}
for f in input_files:
events[f.filename] = self.get_aux_file(f, filetype='events')
return events
| gpl-3.0 |
0h-n0/forex_py | frxpy/data/csvfile.py | 1 | 1436 | """
csvfile.py
---------------
This module contains some classes which can treat csv files.
.. autoclass:: fileio.csvfile.CSV
"""
import time
import codecs
import pathlib
import calendar
import h5py
import pandas
import numpy as np
class CSVFile(object):
def __init__(self, csvfile, header_info='ohlc', bitask_fmt='bitask',
daytime_format='%Y%m%d %H%M%S', delimiter=';', header=None,
unit='minute'):
self.csvfile = pathlib.Path(csvfile).expanduser().resolve()
self.data = None
self.delimiter = delimiter
self.daytime_format = daytime_format
self.header_info = header_info
self.header = header
self.unit = unit
self._read()
def _read(self):
'''
day-time, Open(BID), High(BID), Low(BID), Close(BID),
'''
self.data = pandas.read_csv(str(self.csvfile),
sep=self.delimiter,
header=self.header
)
def convert_daytime_to_seconds(self, daytime) -> int:
s = self.datetime.strptime(daytime, self.daytime_format)
return calendar.timegm(s.utctimetuple())
def convert_seconds_to_daytime(self, seconds):
return self.datetime.utcfromtimestamp(seconds).strftime(self.daytime_format)
def show(self):
'''
create an image.
'''
pass
| mit |
kylerbrown/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
rikima/spark | python/pyspark/sql/tests.py | 1 | 272759 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import warnings
import py4j
from contextlib import contextmanager
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.util import _exception_message
_pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
_pandas_requirement_message = _exception_message(e)
_pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
_pyarrow_requirement_message = _exception_message(e)
_have_pandas = _pandas_requirement_message is None
_have_pyarrow = _pyarrow_requirement_message is None
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.sql.types import _merge_type
from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
class SQLTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedSQLTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
from pyspark.sql.functions import udf
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
from pyspark.sql.functions import udf
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import udf, sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initialization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise Exception("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise Exception("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dateframe_from_pandas_with_dst(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEquals(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)]))
def test_checking_csv_header(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.createDataFrame([[1, 1000], [2000, 2]])\
.toDF('f1', 'f2').write.option("header", "true").csv(path)
schema = StructType([
StructField('f2', IntegerType(), nullable=True),
StructField('f1', IntegerType(), nullable=True)])
df = self.spark.read.option('header', 'true').schema(schema)\
.csv(path, enforceSchema=False)
self.assertRaisesRegexp(
Exception,
"CSV header does not conform to the schema",
lambda: df.collect())
finally:
shutil.rmtree(path)
def test_ignore_column_of_all_nulls(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
df = self.spark.createDataFrame([["""{"a":null, "b":1, "c":3.0}"""],
["""{"a":null, "b":null, "c":"string"}"""],
["""{"a":null, "b":null, "c":null}"""]])
df.write.text(path)
schema = StructType([
StructField('b', LongType(), nullable=True),
StructField('c', StringType(), nullable=True)])
readback = self.spark.read.json(path, dropFieldIfAllNull=True)
self.assertEquals(readback.schema, schema)
finally:
shutil.rmtree(path)
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
class HiveSparkSubmitTests(SparkSubmitTests):
@classmethod
def setUpClass(cls):
# get a SparkContext to check for availability of Hive
sc = SparkContext('local[4]', cls.__name__)
cls.hive_available = True
try:
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
finally:
# we don't need this SparkContext for the test
sc.stop()
def setUp(self):
super(HiveSparkSubmitTests, self).setUp()
if not self.hive_available:
self.skipTest("Hive is not available.")
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
class SparkSessionTests(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_context.stop()
def test_udf_init_shouldnt_initialize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
import pyarrow as pa
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion("0.10.0") <= LooseVersion(pa.__version__):
cls.schema.add(StructField("9_binary_t", BinaryType(), True))
cls.data[0] = cls.data[0] + (bytearray(b"a"),)
cls.data[1] = cls.data[1] + (bytearray(b"bb"),)
cls.data[2] = cls.data[2] + (bytearray(b"ccc"),)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with warnings.catch_warnings(record=True) as warns:
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
from distutils.version import LooseVersion
import pyarrow as pa
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
schema = StructType([StructField("binary", BinaryType(), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type.*BinaryType'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[7] = fields[7], fields[0] # swap str with timestamp
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type.*BinaryType'):
self.spark.createDataFrame(
pd.DataFrame([[{'a': b'aaa'}]]), "a: binary")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType, StructField, DoubleType
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from py4j.protocol import Py4JJavaError
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
from pyspark.sql.functions import pandas_udf
@pandas_udf('double')
def random_udf(v):
import pandas as pd
import numpy as np
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col, array
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf
from pyspark.rdd import PythonEvalType
import random
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
from decimal import Decimal
from pyspark.sql.functions import pandas_udf, col
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.functions import pandas_udf, col
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
else:
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_varargs(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
def test_vectorized_udf_dates(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import date
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf, pandas_udf, col
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
from pyspark.sql.functions import pandas_udf, sum
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
import pandas as pd
from pyspark.sql.functions import col, udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
import pandas as pd
from pyspark.sql import Column
from pyspark.sql.functions import udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id")))
# Different forms of group map pandas UDF, results of these are the same
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType())),
StructField('v1', DoubleType()),
StructField('v2', LongType())])
udf1 = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_array_type_correct(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_register_grouped_map_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or '
'SQL_SCALAR_PANDAS_UDF'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
schema = StructType(
[StructField("id", LongType(), True),
StructField("arr_ts", ArrayType(TimestampType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
def test_column_order(self):
from collections import OrderedDict
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id')
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "No cast implemented"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
with self.sql_conf({"spark.sql.execution.pandas.groupedMap.assignColumnsByPosition": True}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
import pyspark.sql.functions as F
@F.pandas_udf('key long, col string', F.PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
F.col('temp0.key') == F.col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
import pandas as pd
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
from pyspark.sql.functions import pandas_udf, array
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
from pyspark.sql.types import DoubleType, MapType
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
from pyspark.sql.functions import mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
from pyspark.sql.functions import sum, mean
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
from pyspark.sql.functions import sum, mean
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
from pyspark.sql.functions import lit, sum
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
from pyspark.sql.functions import col, sum
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
from pyspark.sql.functions import sum, lit, col
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
def test_simple(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, percent_rank, mean, max
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
from pyspark.sql.functions import max, min, mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
from pyspark.sql.functions import max, min, rank, col
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean, pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
ow = self.ordered_window
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*Only unbounded window frame is supported.*'):
df.withColumn('mean_v', mean_udf(df['v']).over(ow))
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| apache-2.0 |
lazywei/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
nhejazi/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
homeslike/OpticalTweezer | scripts/data2/vCOMhist.py | 28 | 1192 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import sys
# import vCOMdata.dat as array
# folder="../output/runs/170123_2033/"
folder="../output/runs/"+str(sys.argv[1])
# when = str(sys.argv[1])
for i in range(0,len(sys.argv)):
print(str(i) + ": "+ str(sys.argv[i]))
# data = np.genfromtxt(folder+"/vCOMData.dat",usecols=(0,1,2), skip_header=100)
# vx = np.genfromtxt(folder+" "+str(sys.argv[1])+"/vCOMData.dat",usecols=0, skip_header=100)
vx = np.genfromtxt(folder+"/vCOMData.dat",usecols=0, skip_header=100)
vy = np.genfromtxt(folder+"/vCOMData.dat",usecols=1, skip_header=100)
vz = np.genfromtxt(folder+"/vCOMData.dat",usecols=2, skip_header=100)
# print(vx)
# vx = []
# for i in range(0,len(data)):
# vx.append(data[i][0])
# hist_vx = np.histogram(vx,bins=100)
# print(len(hist_vx[1]))
# print("")
# print(len(hist_vx[0]))
# hist_vy = np.histogram(vy,bins=100)
# hist_vz = np.histogram(vz,bins=100)
# printable = []
# for i in range(0,len(hist_vx[0])):
# printable.append((hist_vx[1][i],hist_vx[0][i]))
# print(hist_vx)
plt.hist(vx,bins=100)
# plt.hist(vy,bins=100)
# plt.hist(vz,bins=100)
# plt.plot(hist_vx[1],hist_vx[0])
plt.show()
| mit |
Srisai85/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
Jokiva/Computational-Physics | lecture 14/Problem_3.py | 1 | 2068 | # import packages
import numpy as np
import matplotlib.pyplot as plt
# the amount of shift up
verti_dev = 3
# the modified integrand
def f_prime(x):
return 2 * np.sin(2 * np.sqrt(np.pi ** 2 - x ** 2)) + verti_dev
if __name__ == '__main__':
# test block
"""
x = np.linspace(0, np.pi)
y = f_prime(x)
plt.plot(x, y)
plt.show()
"""
# random sampling
num_of_samp = 100000
x = np.random.uniform(0, np.pi, num_of_samp)
y = np.random.uniform(0, 6, num_of_samp)
# containers for different types of dots
above = []
below = []
# count the scuccess
success = 0
for i in range(num_of_samp):
if y[i] < f_prime(x[i]):
below.append((x[i], y[i]))
success += 1
else:
above.append([x[i], y[i]])
# calculate the probability
p = success / num_of_samp
# the modified area
area = p * 6 * (np.pi - 0)
# the actual area
area = area - verti_dev * (np.pi - 0)
# print the integration result
print(area)
# some maniplation on the data
above = np.array(above)
below = np.array(below)
# the threshold of dots
thresh_of_dots = 3000
# the annotate of plot
anno = r'$\int_{0}^{\pi}{2 \sin (2 \sqrt{\pi^2 - x^2})} \approx$' + '${}$'.format(area)
plt.figure()
# plot the fucntion
x_spc = np.linspace(0, np.pi, num_of_samp)
plt.plot(x_spc, f_prime(x_spc) - verti_dev,
c='c',
linewidth=5)
# plot the dots
# plt.scatter(x, y)
# above
"""
for x_pos, y_pos in above:
plt.scatter(x_pos, y_pos - verti_dev, c='b')
"""
plt.scatter(above[:thresh_of_dots, 0], above[:thresh_of_dots, 1] - verti_dev, c='b', marker='+')
# below
"""
for x_pos, y_pos in below:
plt.scatter(x_pos, y_pos - verti_dev, c='r')
"""
plt.scatter(below[:thresh_of_dots, 0], below[:thresh_of_dots, 1] - verti_dev, c='r', marker='+')
plt.title(anno)
plt.grid()
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.show()
| gpl-3.0 |
cpcloud/ibis | ibis/config.py | 1 | 20553 | # This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re
import warnings
from collections import namedtuple
from contextlib import contextmanager
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb'
)
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError(
"Must provide an even number of non-keyword " "arguments"
)
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = ''
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError(
'You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value'
)
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper:
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
return pprint.pformat(self.d)
def __setattr__(self, key, val):
prefix = self.prefix
if prefix:
prefix += "."
prefix += key
# you can't set new keys and you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = self.prefix
if prefix:
prefix += "."
prefix += key
try:
v = self.d[key]
except KeyError as e:
raise AttributeError(*e.args)
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return sorted(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc:
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(
opts_desc=opts_desc, opts_list=opts_list
)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context:
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('interactive', True):
... print(options.interactive)
True
>>> options.interactive
False
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError(
"Path prefix to option '%s' is already an option"
% '.'.join(path[:i])
)
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError(
"Path prefix to option '%s' is already an option"
% '.'.join(path[:-1])
)
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(
key=key, defval=defval, doc=doc, validator=validator, cb=cb
)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError(
"Option '%s' has already been defined as deprecated." % key
)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
"""Builds a formatted description of a registered option and prints it."""
o = _get_registered_option(k)
d = _get_deprecated_option(k)
buf = ['{} '.format(k)]
if o.doc:
doc = '\n'.join(o.doc.strip().splitlines())
else:
doc = 'No description available.'
buf.append(doc)
if o:
buf.append(
'\n [default: {}] [currently: {}]'.format(
o.defval, _get_option(k, True)
)
)
if d:
buf.append(
'\n (Deprecated{})'.format(
', use `{}` instead.'.format(d.rkey) if d.rkey else ''
)
)
buf.append('\n\n')
return ''.join(buf)
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = '- ' + name + '.[' if name else ''
ls = wrap(
', '.join(ks),
width,
initial_indent=pfx,
subsequent_indent=' ',
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind('.')]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError(
"Value must be one of %s" % str("|".join(pp_values))
)
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_text = is_instance_factory((str, bytes))
| apache-2.0 |
etkirsch/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
gmatteo/pymatgen | pymatgen/io/abinit/pseudos.py | 5 | 65306 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
import abc
import collections
import logging
import os
import sys
from collections import OrderedDict, defaultdict, namedtuple
import numpy as np
from monty.collections import AttrDict, Namespace
# from monty.dev import deprecated
from monty.functools import lazy_property
from monty.itertools import iterator_from_slice
from monty.json import MontyDecoder, MSONable
from monty.os.path import find_exts
from monty.string import is_string, list_strings
from tabulate import tabulate
from pymatgen.core.periodic_table import Element
from pymatgen.core.xcfunc import XcFunc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from pymatgen.util.serialization import pmg_serialize
logger = logging.getLogger(__name__)
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, "r") as fh:
return fh.readlines()
lines = []
with open(filename, "r") as fh:
for lineno, line in enumerate(fh):
if lineno == nlines:
break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
class Pseudo(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential sub-classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Build an instance of a concrete Pseudo subclass from filename.
Note: the parser knows the concrete class that should be instantiated
Client code should rely on the abstract interface provided by Pseudo.
"""
return PseudoParser().parse(filename)
def __eq__(self, other):
if other is None:
return False
return (
self.md5 == other.md5
and self.__class__ == other.__class__
and self.Z == other.Z
and self.Z_val == other.Z_val
and self.l_max == other.l_max
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
try:
return "<%s at %s>" % (
self.__class__.__name__,
os.path.relpath(self.filepath),
)
except Exception:
# relpath can fail if the code is executed in demon mode.
return "<%s at %s>" % (self.__class__.__name__, self.filepath)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
# pylint: disable=E1101
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines)
@property
@abc.abstractmethod
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
"""Absolute path to pseudopotential file."""
# pylint: disable=E1101
return os.path.abspath(self.path)
@property
def basename(self):
"""File basename."""
# pylint: disable=E1101
return os.path.basename(self.filepath)
@property
@abc.abstractmethod
def Z(self):
"""The atomic number of the atom."""
@property
@abc.abstractmethod
def Z_val(self):
"""Valence charge."""
@property
def type(self):
"""Type of pseudo."""
return self.__class__.__name__
@property
def element(self):
"""Pymatgen :class:`Element`."""
try:
return Element.from_Z(self.Z)
except (KeyError, IndexError):
return Element.from_Z(int(self.Z))
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@property
@abc.abstractmethod
def l_max(self):
"""Maximum angular momentum."""
@property
@abc.abstractmethod
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
@lazy_property
def md5(self):
"""MD5 hash value."""
# if self.has_dojo_report and "md5" in self.dojo_report: return self.dojo_report["md5"]
return self.compute_md5()
def compute_md5(self):
"""Compute and erturn MD5 hash value."""
# pylint: disable=E1101
import hashlib
with open(self.path, "rt") as fh:
text = fh.read()
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@property
@abc.abstractmethod
def supports_soc(self):
"""
True if the pseudo can be used in a calculation with spin-orbit coupling.
Base classes should provide a concrete implementation that computes this value.
"""
@pmg_serialize
def as_dict(self, **kwargs):
"""Return dictionary for MSONable protocol."""
# pylint: disable=E1101
return dict(
basename=self.basename,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
md5=self.md5,
filepath=self.filepath,
# xc=self.xc.as_dict(),
)
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
new = cls.from_file(d["filepath"])
# Consistency test based on md5
if "md5" in d and d["md5"] != new.md5:
raise ValueError(
"The md5 found in file does not agree with the one in dict\n"
"Received %s\nComputed %s" % (d["md5"], new.md5)
)
return new
def as_tmpfile(self, tmpdir=None):
"""
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
"""
# pylint: disable=E1101
import shutil
import tempfile
tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
# Copy dojoreport file if present.
root, ext = os.path.splitext(self.filepath)
djrepo = root + ".djrepo"
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
# Build new object and copy dojo_report if present.
new = self.__class__.from_file(new_path)
if self.has_dojo_report:
new.dojo_report = self.dojo_report.deepcopy()
return new
@property
def has_dojo_report(self):
"""True if the pseudo has an associated `DOJO_REPORT` section."""
# pylint: disable=E1101
return hasattr(self, "dojo_report") and bool(self.dojo_report)
@property
def djrepo_path(self):
"""The path of the djrepo file. None if file does not exist."""
# pylint: disable=E1101
root, ext = os.path.splitext(self.filepath)
path = root + ".djrepo"
return path
# if os.path.exists(path): return path
# return None
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
# pylint: disable=E1101
if not self.has_dojo_report:
return Hint(ecut=0.0, pawecutdg=0.0)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
if "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0.0, pawecutdg=0.0)
@property
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True
def open_pspsfile(self, ecut=20, pawecutdg=None):
"""
Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid.
"""
from abipy.abio.factories import gs_input
from abipy.core.structure import Structure
from abipy.electrons.psps import PspsFile
from abipy.flowtk import AbinitTask
# Build fake structure.
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None:
pawecutdg = ecut * 4
inp = gs_input(
structure,
pseudos=[self],
ecut=ecut,
pawecutdg=pawecutdg,
spin_mode="unpolarized",
kppa=1,
)
# Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop.
inp["prtpsps"] = -1
# Build temporary task and run it (ignore retcode because we don't exit cleanly)
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
# Open the PSPS.nc file.
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None
class NcPseudo(metaclass=abc.ABCMeta):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@property
@abc.abstractmethod
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(metaclass=abc.ABCMeta):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
# def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
# @property
# def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@property
@abc.abstractmethod
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path: Filename.
header: :class:`AbinitHeader` instance.
"""
self.path = path
self.header = header
self._summary = header.summary
# Build xc from header.
self.xc = XcFunc.from_abinit_ixc(header["pspxc"])
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
# pylint: disable=E1101
return self._zatom
@property
def Z_val(self):
# pylint: disable=E1101
return self._zion
@property
def l_max(self):
# pylint: disable=E1101
return self._lmax
@property
def l_local(self):
# pylint: disable=E1101
return self._lloc
@property
def supports_soc(self):
# Treate ONCVPSP pseudos
# pylint: disable=E1101
if self._pspcod == 8:
switch = self.header["extension_switch"]
if switch in (0, 1):
return False
if switch in (2, 3):
return True
raise ValueError("Don't know how to handle extension_switch: %s" % switch)
# TODO Treat HGH HGHK pseudos
# As far as I know, other Abinit pseudos do not support SOC.
return False
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""Norm-conserving pseudopotential in the Abinit format."""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
# pylint: disable=E1101
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
# pylint: disable=E1101
return self._zion
@property
def l_max(self):
# pylint: disable=E1101
return self._lmax
@property
def l_local(self):
# pylint: disable=E1101
return self._lloc
@property
def nlcc_radius(self):
# pylint: disable=E1101
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
# pylint: disable=E1101
return self._r_cut
# def orbitals(self):
@property
def supports_soc(self):
return True
class Hint:
"""
Suggested value for the cutoff energy [Hartree units]
and the cutoff energy for the dense grid (only for PAW pseudos).
"""
def __init__(self, ecut, pawecutdg=None):
self.ecut = ecut
self.pawecutdg = ecut if pawecutdg is None else pawecutdg
def __str__(self):
if self.pawecutdg is not None:
return "ecut: %s, pawecutdg: %s" % (self.ecut, self.pawecutdg)
return "ecut: %s" % (self.ecut)
@pmg_serialize
def as_dict(self):
"""Return dictionary for MSONable protocol."""
return dict(ecut=self.ecut, pawecutdg=self.pawecutdg)
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
return cls(**{k: v for k, v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.abc.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0:
continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might get strings in the form: foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (
line,
keys,
values,
)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super().__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num
class NcAbinitHeader(AbinitHeader):
"""The abinit header found in the NC pseudopotential files."""
_attr_desc = namedtuple("_attr_desc", "default astype")
_VARS = {
# Mandatory
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"mmax": _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg": _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg": _attr_desc(0.0, float),
"qchrg": _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super().__init__()
# pseudos generated by APE use llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for key, desc in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except Exception:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add remaining arguments, e.g. extension_switch
if kwargs:
self.update(kwargs)
@staticmethod
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""
Parse the HGH abinit header. Example:
Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
10 8 010605 zatom,zion,pspdat
3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
"""
lines = _read_nlines(filename, 3)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def gth_header(filename, ppdesc):
"""
Parse the GTH abinit header. Example:
Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996
1 1 960508 zatom,zion,pspdat
2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well
0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4
0 0 0 rs, h1s, h2s
0 0 rp, h1p
1.36 .2 0.6 rcutoff, rloc
"""
lines = _read_nlines(filename, 7)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""
Parse the ONCVPSP abinit header. Example:
Li ONCVPSP r_core= 2.01 3.02
3.0000 3.0000 140504 zatom,zion,pspd
8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
2 2 0 0 0 nproj
0 extension_switch
0 -2.5000025868368D+00 -1.2006906995331D+00
1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
"""
lines = _read_nlines(filename, 6)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
# Replace pspd with pspdata
header.update({"pspdat": header["pspd"]})
header.pop("pspd")
# Read extension switch
header["extension_switch"] = int(lines[5].split()[0])
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""
Parse the TM abinit header. Example:
Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
100.00000 14.00000 940714 zatom, zion, pspdat
1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, -1)
header = []
for lineno, line in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
# if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
# 0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = OrderedDict()
for idx in range(2 * (lmax + 1)):
line = lines[idx]
if idx % 2 == 0:
proj_info = [
line,
]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5, 4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx + 1])
summary = header[0]
header = _dict_from_lines(header, [0, 3, 6, 3])
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""The abinit header found in the PAW pseudopotential files."""
_attr_desc = namedtuple("_attr_desc", "default astype")
_VARS = {
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"mmax": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"pspfmt": _attr_desc(None, str),
"creatorID": _attr_desc(None, int),
"basis_size": _attr_desc(None, int),
"lmn_size": _attr_desc(None, int),
"orbitals": _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut": _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type": _attr_desc(None, int),
"rshape": _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super().__init__()
self.summary = summary.strip()
for key, desc in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except Exception:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""
Parse the PAW abinit header. Examples:
Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom,zion,pspdat
7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw3 1305 : pspfmt,creatorID
5 13 : basis_size,lmn_size
0 0 1 1 2 : orbitals
3 : number_of_meshes
1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
2.3000000000 : r_cut(SPH)
2 0.
Another format:
C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom,zion,pspdat
7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw4 2230 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
1.5550009124 : r_cut(PAW)
3 0. : shape_type,rshape
Yet nnother one:
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom,zion,pspdat
7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw5 1331 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
1.5669671236 : r_cut(PAW)
2 0. : shape_type,rshape
"""
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
# print filename, header
# Skip meshes =
lines = lines[2 + num_meshes :]
# for midx in range(num_meshes):
# l = midx + 1
# print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
# print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
# print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by :class:`PseudoParser`"""
class PseudoParser:
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = OrderedDict(
{
1: ppdesc(1, "TM", "NC", None),
2: ppdesc(2, "GTH", "NC", None),
3: ppdesc(3, "HGH", "NC", None),
4: ppdesc(4, "Teter", "NC", None),
# 5: ppdesc(5, "NC", , None),
6: ppdesc(6, "FHI", "NC", None),
7: ppdesc(6, "PAW_abinit_text", "PAW", None),
8: ppdesc(8, "ONCVPSP", "NC", None),
10: ppdesc(10, "HGHK", "NC", None),
}
)
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
# _FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
# 2: {'n': 5, 'name': 'HL'},
# 3: {'n': 2, 'name': 'PWCA'},
# 4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if ext in exclude_exts or fname in exclude_fnames or fname.startswith(".") or not os.path.isfile(path):
continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except Exception:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for lineno, line in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except Exception:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (
filename,
line,
)
logger.critical(msg)
return None
# if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno + 1].split()
pspfmt, creatorID = tokens[:2]
# if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format=pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
logger.critical("Cannot find ppdesc in %s" % path)
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI": NcAbinitHeader.fhi_header,
"GTH": NcAbinitHeader.gth_header,
"TM": NcAbinitHeader.tm_header,
"Teter": NcAbinitHeader.tm_header,
"HGH": NcAbinitHeader.hgh_header,
"HGHK": NcAbinitHeader.hgh_header,
"ONCVPSP": NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception:
raise self.Error(path + ":\n" + straceback())
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
# TODO use RadialFunction from pseudo_dojo.
class RadialFunction(namedtuple("RadialFunction", "mesh values")):
"""
Radial Function class.
"""
pass
class PawXmlSetup(Pseudo, PawPseudo):
"""
Setup class for PawXml.
"""
def __init__(self, filepath):
"""
:param filepath:
"""
# pylint: disable=E1101
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
# self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
# Build xc from header.
xc_info = root.find("xc_functional").attrib
self.xc = XcFunc.from_type_name(xc_info["type"], xc_info["name"])
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
# self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
# self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
# <valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
# </valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = OrderedDict()
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
# print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[gid] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@lazy_property
def root(self):
"""
Root tree of XML.
"""
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
return tree.getroot()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@property
def supports_soc(self):
"""
Here I assume that the ab-initio code can treat the SOC within the on-site approximation
"""
return True
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend + 1))
if eq == "r=a*exp(d*i)":
a, d = float(grid_params["a"]), float(grid_params["d"])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == "r=a*i/(n-i)":
a, n = float(grid_params["a"]), float(grid_params["n"])
mesh = [a * i / (n - i) for i in indices]
elif eq == "r=a*(exp(d*i)-1)":
a, d = float(grid_params["a"]), float(grid_params["d"])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == "r=d*i":
d = float(grid_params["d"])
mesh = [d * i for i in indices]
elif eq == "r=(i/n+a)^5/a-a^4":
a, n = float(grid_params["a"]), float(grid_params["n"])
mesh = [(i / n + a) ** 5 / a - a ** 4 for i in indices]
else:
raise ValueError("Unknown grid type: %s" % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
# pylint: disable=E1101
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
# pylint: disable=E1101
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@lazy_property
def ae_core_density(self):
"""The all-electron radial density."""
mesh, values, attrib = self._parse_radfunc("ae_core_density")
return RadialFunction(mesh, values)
@lazy_property
def pseudo_core_density(self):
"""The pseudized radial density."""
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
return RadialFunction(mesh, values)
@lazy_property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
ae_partial_waves = OrderedDict()
for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
# val_state = self.valence_states[state]
ae_partial_waves[state] = RadialFunction(mesh, values)
return ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
pseudo_partial_waves = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
# val_state = self.valence_states[state]
pseudo_partial_waves[state] = RadialFunction(mesh, values)
return pseudo_partial_waves
@lazy_property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
projector_functions = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
# val_state = self.valence_states[state]
projector_functions[state] = RadialFunction(mesh, values)
return projector_functions
def yield_figs(self, **kwargs): # pragma: no cover
"""
This function *generates* a predefined list of matplotlib figures with minimal input from the user.
"""
yield self.plot_densities(title="PAW densities", show=False)
yield self.plot_waves(title="PAW waves", show=False)
yield self.plot_projectors(title="PAW projectors", show=False)
# yield self.plot_potentials(title="potentials", show=False)
@add_fig_kwargs
def plot_densities(self, ax=None, **kwargs):
"""
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
# ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else r"$\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_waves(self, ax=None, fontsize=12, **kwargs):
"""
Plot the AE and the pseudo partial waves.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: fontsize for legends and titles
Returns: `matplotlib` figure
"""
# pylint: disable=E1101
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\phi,\, r\tilde\phi\, [Bohr]^{-\frac{1}{2}}$")
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
@add_fig_kwargs
def plot_projectors(self, ax=None, fontsize=12, **kwargs):
"""
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
# pylint: disable=E1101
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\tilde p\, [Bohr]^{-\frac{1}{2}}$")
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
# @add_fig_kwargs
# def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# ax.legend(loc="best")
# if title is not None: fig.suptitle(title)
# if show: plt.show()
# if savefig: fig.savefig(savefig)
# return fig
class PseudoTable(collections.abc.Sequence, MSONable, metaclass=abc.ABCMeta):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of :class:`PseudoTable` from the iterable items.
"""
if isinstance(items, cls):
return items
return cls(items)
@classmethod
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
"""
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
"""
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info("Skipping file %s" % f)
except Exception:
logger.info("Skipping file %s" % f)
if not pseudos:
logger.warning("No pseudopotentials parsed from folder %s" % top)
return None
logger.info("Creating PseudoTable with %i pseudopotentials" % len(pseudos))
else:
if exts is None:
exts = ("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
def __init__(self, pseudos):
"""
Args:
pseudos: List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.abc.Iterable):
pseudos = [pseudos]
if len(pseudos) and is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = defaultdict(list)
for pseudo in pseudos:
if not isinstance(pseudo, Pseudo):
pseudo = Pseudo.from_file(pseudo)
if pseudo is not None:
self._pseudos_with_z[pseudo.Z].append(pseudo)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z. Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return self.__class__(pseudos)
return self.__class__(self._pseudos_with_z[Z])
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.to_table()
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
return sorted(list(self._pseudos_with_z.keys()))
# def max_ecut_pawecutdg(self, accuracy):
# """Return the maximum value of ecut and pawecutdg based on the hints available in the pseudos."""
# ecut = max(p.hint_for_accuracy(accuracy=accuracy).ecut for p in self)
# pawecutdg = max(p.hint_for_accuracy(accuracy=accuracy).pawecutdg for p in self)
# return ecut, pawecutdg
def as_dict(self, **kwargs):
"""Return dictionary for MSONable protocol."""
d = {}
for p in self:
k, count = p.element.name, 1
# k, count = p.element, 1
# Handle multiple-pseudos with the same name!
while k in d:
k += k.split("#")[0] + "#" + str(count)
count += 1
d.update({k: p.as_dict()})
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
pseudos = []
dec = MontyDecoder()
for k, v in d.items():
if not k.startswith("@"):
pseudos.append(dec.process_decoded(v))
return cls(pseudos)
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]:
return False
return True
def all_combinations_for_elements(self, element_symbols):
"""
Return a list with all the the possible combination of pseudos
for the given list of element_symbols.
Each item is a list of pseudopotential objects.
Example::
table.all_combinations_for_elements(["Li", "F"])
"""
d = OrderedDict()
for symbol in element_symbols:
d[symbol] = self.select_symbols(symbol, ret_list=True)
from itertools import product
return list(product(*d.values()))
def pseudo_with_symbol(self, symbol, allow_multi=False):
"""
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
"""
pseudos = self.select_symbols(symbol, ret_list=True)
if not pseudos or (len(pseudos) > 1 and not allow_multi):
raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol))
if not allow_multi:
return pseudos[0]
return pseudos
def pseudos_with_symbols(self, symbols):
"""
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
"""
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1]
if duplicated_elements:
raise ValueError("Found multiple occurrences of symbol(s) %s" % ", ".join(duplicated_elements))
missing_symbols = [s for s in symbols if s not in found_symbols]
if missing_symbols:
raise ValueError("Missing data for symbol(s) %s" % ", ".join(missing_symbols))
return pseudos
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols:
continue
else:
if p.symbol not in symbols:
continue
pseudos.append(p)
if ret_list:
return pseudos
return self.__class__(pseudos)
def get_pseudos_for_structure(self, structure):
"""
Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`.
Args:
structure: pymatgen :class:`Structure`.
Raises:
`ValueError` if one of the chemical symbols is not found or
multiple occurences are present in the table.
"""
return self.pseudos_with_symbols(structure.symbol_set)
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream)
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p):
continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(
table,
headers=["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid",
)
def sorted(self, attrname, reverse=False):
"""
Sort the table according to the value of attribute attrname.
Return:
New class:`PseudoTable` object
"""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z))
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Return new class:`PseudoTable` object.
Args:
condition:
Function that accepts a :class:`Pseudo` object and returns True or False.
"""
return self.__class__([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object."""
return self.select(condition=lambda p: p.has_dojo_report)
def select_rows(self, rows):
"""
Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table.
rows can be either a int or a list of integers.
"""
if not isinstance(rows, (list, tuple)):
rows = [rows]
return self.__class__([p for p in self if p.element.row in rows])
def select_family(self, family):
"""
Return PseudoTable with element beloging to the specified family, e.g. familiy="alkaline"
"""
# e.g element.is_alkaline
return self.__class__([p for p in self if getattr(p.element, "is_" + family)])
| mit |
monkeypants/MAVProxy | MAVProxy/modules/lib/wxhorizon_ui.py | 1 | 32242 | import time
from MAVProxy.modules.lib.wxhorizon_util import Attitude, VFR_HUD, Global_Position_INT, BatteryInfo, FlightState, WaypointInfo, FPS
from MAVProxy.modules.lib.wx_loader import wx
import math, time
import matplotlib
matplotlib.use('wxAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import Polygon
import matplotlib.patheffects as PathEffects
from matplotlib import patches
import matplotlib as mpl
class HorizonFrame(wx.Frame):
""" The main frame of the horizon indicator."""
def __init__(self, state, title):
self.state = state
# Create Frame and Panel(s)
wx.Frame.__init__(self, None, title=title)
state.frame = self
# Initialisation
self.initData()
self.initUI()
self.startTime = time.time()
self.nextTime = 0.0
self.fps = 10.0
def initData(self):
# Initialise Attitude
self.pitch = 0.0 # Degrees
self.roll = 0.0 # Degrees
self.yaw = 0.0 # Degrees
# History Values
self.oldRoll = 0.0 # Degrees
# Initialise Rate Information
self.airspeed = 0.0 # m/s
self.relAlt = 0.0 # m relative to home position
self.relAltTime = 0.0 # s The time that the relative altitude was recorded
self.climbRate = 0.0 # m/s
self.altHist = [] # Altitude History
self.timeHist = [] # Time History
self.altMax = 0.0 # Maximum altitude since startup
# Initialise HUD Info
self.heading = 0.0 # 0-360
# Initialise Battery Info
self.voltage = 0.0
self.current = 0.0
self.batRemain = 0.0
# Initialise Mode and State
self.mode = 'UNKNOWN'
self.armed = ''
self.safetySwitch = ''
# Intialise Waypoint Information
self.currentWP = 0
self.finalWP = 0
self.wpDist = 0
self.nextWPTime = 0
self.wpBearing = 0
def initUI(self):
# Create Event Timer and Bindings
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(100)
self.Bind(wx.EVT_IDLE, self.on_idle)
self.Bind(wx.EVT_CHAR_HOOK,self.on_KeyPress)
# Create Panel
self.panel = wx.Panel(self)
self.vertSize = 0.09
self.resized = False
# Create Matplotlib Panel
self.createPlotPanel()
# Fix Axes - vertical is of length 2, horizontal keeps the same lengthscale
self.rescaleX()
self.calcFontScaling()
# Create Horizon Polygons
self.createHorizonPolygons()
# Center Pointer Marker
self.thick = 0.015
self.createCenterPointMarker()
# Pitch Markers
self.dist10deg = 0.2 # Graph distance per 10 deg
self.createPitchMarkers()
# Add Roll, Pitch, Yaw Text
self.createRPYText()
# Add Airspeed, Altitude, Climb Rate Text
self.createAARText()
# Create Heading Pointer
self.createHeadingPointer()
# Create North Pointer
self.createNorthPointer()
# Create Battery Bar
self.batWidth = 0.1
self.batHeight = 0.2
self.rOffset = 0.35
self.createBatteryBar()
# Create Mode & State Text
self.createStateText()
# Create Waypoint Text
self.createWPText()
# Create Waypoint Pointer
self.createWPPointer()
# Create Altitude History Plot
self.createAltHistoryPlot()
# Show Frame
self.Show(True)
self.pending = []
def createPlotPanel(self):
'''Creates the figure and axes for the plotting panel.'''
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self,-1,self.figure)
self.canvas.SetSize(wx.Size(300,300))
self.axes.axis('off')
self.figure.subplots_adjust(left=0,right=1,top=1,bottom=0)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas,1,wx.EXPAND,wx.ALL)
self.SetSizerAndFit(self.sizer)
self.Fit()
def rescaleX(self):
'''Rescales the horizontal axes to make the lengthscales equal.'''
self.ratio = self.figure.get_size_inches()[0]/float(self.figure.get_size_inches()[1])
self.axes.set_xlim(-self.ratio,self.ratio)
self.axes.set_ylim(-1,1)
def calcFontScaling(self):
'''Calculates the current font size and left position for the current window.'''
self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi
self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi
self.fontSize = self.vertSize*(self.ypx/2.0)
self.leftPos = self.axes.get_xlim()[0]
self.rightPos = self.axes.get_xlim()[1]
def checkReszie(self):
'''Checks if the window was resized.'''
if not self.resized:
oldypx = self.ypx
oldxpx = self.xpx
self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi
self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi
if (oldypx != self.ypx) or (oldxpx != self.xpx):
self.resized = True
else:
self.resized = False
def createHeadingPointer(self):
'''Creates the pointer for the current heading.'''
self.headingTri = patches.RegularPolygon((0.0,0.80),3,0.05,color='k',zorder=4)
self.axes.add_patch(self.headingTri)
self.headingText = self.axes.text(0.0,0.675,'0',color='k',size=self.fontSize,horizontalalignment='center',verticalalignment='center',zorder=4)
def adjustHeadingPointer(self):
'''Adjust the value of the heading pointer.'''
self.headingText.set_text(str(self.heading))
self.headingText.set_size(self.fontSize)
def createNorthPointer(self):
'''Creates the north pointer relative to current heading.'''
self.headingNorthTri = patches.RegularPolygon((0.0,0.80),3,0.05,color='k',zorder=4)
self.axes.add_patch(self.headingNorthTri)
self.headingNorthText = self.axes.text(0.0,0.675,'N',color='k',size=self.fontSize,horizontalalignment='center',verticalalignment='center',zorder=4)
def adjustNorthPointer(self):
'''Adjust the position and orientation of
the north pointer.'''
self.headingNorthText.set_size(self.fontSize)
headingRotate = mpl.transforms.Affine2D().rotate_deg_around(0.0,0.0,self.heading)+self.axes.transData
self.headingNorthText.set_transform(headingRotate)
if (self.heading > 90) and (self.heading < 270):
headRot = self.heading-180
else:
headRot = self.heading
self.headingNorthText.set_rotation(headRot)
self.headingNorthTri.set_transform(headingRotate)
# Adjust if overlapping with heading pointer
if (self.heading <= 10.0) or (self.heading >= 350.0):
self.headingNorthText.set_text('')
else:
self.headingNorthText.set_text('N')
def toggleWidgets(self,widgets):
'''Hides/shows the given widgets.'''
for wig in widgets:
if wig.get_visible():
wig.set_visible(False)
else:
wig.set_visible(True)
def createRPYText(self):
'''Creates the text for roll, pitch and yaw.'''
self.rollText = self.axes.text(self.leftPos+(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0),'Roll: %.2f' % self.roll,color='w',size=self.fontSize)
self.pitchText = self.axes.text(self.leftPos+(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0),'Pitch: %.2f' % self.pitch,color='w',size=self.fontSize)
self.yawText = self.axes.text(self.leftPos+(self.vertSize/10.0),-0.97,'Yaw: %.2f' % self.yaw,color='w',size=self.fontSize)
self.rollText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.pitchText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.yawText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
def updateRPYLocations(self):
'''Update the locations of roll, pitch, yaw text.'''
# Locations
self.rollText.set_position((self.leftPos+(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0)))
self.pitchText.set_position((self.leftPos+(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0)))
self.yawText.set_position((self.leftPos+(self.vertSize/10.0),-0.97))
# Font Size
self.rollText.set_size(self.fontSize)
self.pitchText.set_size(self.fontSize)
self.yawText.set_size(self.fontSize)
def updateRPYText(self):
'Updates the displayed Roll, Pitch, Yaw Text'
self.rollText.set_text('Roll: %.2f' % self.roll)
self.pitchText.set_text('Pitch: %.2f' % self.pitch)
self.yawText.set_text('Yaw: %.2f' % self.yaw)
def createCenterPointMarker(self):
'''Creates the center pointer in the middle of the screen.'''
self.axes.add_patch(patches.Rectangle((-0.75,-self.thick),0.5,2.0*self.thick,facecolor='orange',zorder=3))
self.axes.add_patch(patches.Rectangle((0.25,-self.thick),0.5,2.0*self.thick,facecolor='orange',zorder=3))
self.axes.add_patch(patches.Circle((0,0),radius=self.thick,facecolor='orange',edgecolor='none',zorder=3))
def createHorizonPolygons(self):
'''Creates the two polygons to show the sky and ground.'''
# Sky Polygon
vertsTop = [[-1,0],[-1,1],[1,1],[1,0],[-1,0]]
self.topPolygon = Polygon(vertsTop,facecolor='dodgerblue',edgecolor='none')
self.axes.add_patch(self.topPolygon)
# Ground Polygon
vertsBot = [[-1,0],[-1,-1],[1,-1],[1,0],[-1,0]]
self.botPolygon = Polygon(vertsBot,facecolor='brown',edgecolor='none')
self.axes.add_patch(self.botPolygon)
def calcHorizonPoints(self):
'''Updates the verticies of the patches for the ground and sky.'''
ydiff = math.tan(math.radians(-self.roll))*float(self.ratio)
pitchdiff = self.dist10deg*(self.pitch/10.0)
# Sky Polygon
vertsTop = [(-self.ratio,ydiff-pitchdiff),(-self.ratio,1),(self.ratio,1),(self.ratio,-ydiff-pitchdiff),(-self.ratio,ydiff-pitchdiff)]
self.topPolygon.set_xy(vertsTop)
# Ground Polygon
vertsBot = [(-self.ratio,ydiff-pitchdiff),(-self.ratio,-1),(self.ratio,-1),(self.ratio,-ydiff-pitchdiff),(-self.ratio,ydiff-pitchdiff)]
self.botPolygon.set_xy(vertsBot)
def createPitchMarkers(self):
'''Creates the rectangle patches for the pitch indicators.'''
self.pitchPatches = []
# Major Lines (multiple of 10 deg)
for i in [-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9]:
width = self.calcPitchMarkerWidth(i)
currPatch = patches.Rectangle((-width/2.0,self.dist10deg*i-(self.thick/2.0)),width,self.thick,facecolor='w',edgecolor='none')
self.axes.add_patch(currPatch)
self.pitchPatches.append(currPatch)
# Add Label for +-30 deg
self.vertSize = 0.09
self.pitchLabelsLeft = []
self.pitchLabelsRight = []
i=0
for j in [-90,-60,-30,30,60,90]:
self.pitchLabelsLeft.append(self.axes.text(-0.55,(j/10.0)*self.dist10deg,str(j),color='w',size=self.fontSize,horizontalalignment='center',verticalalignment='center'))
self.pitchLabelsLeft[i].set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.pitchLabelsRight.append(self.axes.text(0.55,(j/10.0)*self.dist10deg,str(j),color='w',size=self.fontSize,horizontalalignment='center',verticalalignment='center'))
self.pitchLabelsRight[i].set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
i += 1
def calcPitchMarkerWidth(self,i):
'''Calculates the width of a pitch marker.'''
if (i % 3) == 0:
if i == 0:
width = 1.5
else:
width = 0.9
else:
width = 0.6
return width
def adjustPitchmarkers(self):
'''Adjusts the location and orientation of pitch markers.'''
pitchdiff = self.dist10deg*(self.pitch/10.0)
rollRotate = mpl.transforms.Affine2D().rotate_deg_around(0.0,-pitchdiff,self.roll)+self.axes.transData
j=0
for i in [-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9]:
width = self.calcPitchMarkerWidth(i)
self.pitchPatches[j].set_xy((-width/2.0,self.dist10deg*i-(self.thick/2.0)-pitchdiff))
self.pitchPatches[j].set_transform(rollRotate)
j+=1
# Adjust Text Size and rotation
i=0
for j in [-9,-6,-3,3,6,9]:
self.pitchLabelsLeft[i].set_y(j*self.dist10deg-pitchdiff)
self.pitchLabelsRight[i].set_y(j*self.dist10deg-pitchdiff)
self.pitchLabelsLeft[i].set_size(self.fontSize)
self.pitchLabelsRight[i].set_size(self.fontSize)
self.pitchLabelsLeft[i].set_rotation(self.roll)
self.pitchLabelsRight[i].set_rotation(self.roll)
self.pitchLabelsLeft[i].set_transform(rollRotate)
self.pitchLabelsRight[i].set_transform(rollRotate)
i += 1
def createAARText(self):
'''Creates the text for airspeed, altitude and climb rate.'''
self.airspeedText = self.axes.text(self.rightPos-(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0),'AS: %.1f m/s' % self.airspeed,color='w',size=self.fontSize,ha='right')
self.altitudeText = self.axes.text(self.rightPos-(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0),'ALT: %.1f m ' % self.relAlt,color='w',size=self.fontSize,ha='right')
self.climbRateText = self.axes.text(self.rightPos-(self.vertSize/10.0),-0.97,'CR: %.1f m/s' % self.climbRate,color='w',size=self.fontSize,ha='right')
self.airspeedText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.altitudeText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.climbRateText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
def updateAARLocations(self):
'''Update the locations of airspeed, altitude and Climb rate.'''
# Locations
self.airspeedText.set_position((self.rightPos-(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0)))
self.altitudeText.set_position((self.rightPos-(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0)))
self.climbRateText.set_position((self.rightPos-(self.vertSize/10.0),-0.97))
# Font Size
self.airspeedText.set_size(self.fontSize)
self.altitudeText.set_size(self.fontSize)
self.climbRateText.set_size(self.fontSize)
def updateAARText(self):
'Updates the displayed airspeed, altitude, climb rate Text'
self.airspeedText.set_text('AR: %.1f m/s' % self.airspeed)
self.altitudeText.set_text('ALT: %.1f m ' % self.relAlt)
self.climbRateText.set_text('CR: %.1f m/s' % self.climbRate)
def createBatteryBar(self):
'''Creates the bar to display current battery percentage.'''
self.batOutRec = patches.Rectangle((self.rightPos-(1.3+self.rOffset)*self.batWidth,1.0-(0.1+1.0+(2*0.075))*self.batHeight),self.batWidth*1.3,self.batHeight*1.15,facecolor='darkgrey',edgecolor='none')
self.batInRec = patches.Rectangle((self.rightPos-(self.rOffset+1+0.15)*self.batWidth,1.0-(0.1+1+0.075)*self.batHeight),self.batWidth,self.batHeight,facecolor='lawngreen',edgecolor='none')
self.batPerText = self.axes.text(self.rightPos - (self.rOffset+0.65)*self.batWidth,1-(0.1+1+(0.075+0.15))*self.batHeight,'%.f' % self.batRemain,color='w',size=self.fontSize,ha='center',va='top')
self.batPerText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.voltsText = self.axes.text(self.rightPos-(self.rOffset+1.3+0.2)*self.batWidth,1-(0.1+0.05+0.075)*self.batHeight,'%.1f V' % self.voltage,color='w',size=self.fontSize,ha='right',va='top')
self.ampsText = self.axes.text(self.rightPos-(self.rOffset+1.3+0.2)*self.batWidth,1-self.vertSize-(0.1+0.05+0.1+0.075)*self.batHeight,'%.1f A' % self.current,color='w',size=self.fontSize,ha='right',va='top')
self.voltsText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.ampsText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.axes.add_patch(self.batOutRec)
self.axes.add_patch(self.batInRec)
def updateBatteryBar(self):
'''Updates the position and values of the battery bar.'''
# Bar
self.batOutRec.set_xy((self.rightPos-(1.3+self.rOffset)*self.batWidth,1.0-(0.1+1.0+(2*0.075))*self.batHeight))
self.batInRec.set_xy((self.rightPos-(self.rOffset+1+0.15)*self.batWidth,1.0-(0.1+1+0.075)*self.batHeight))
self.batPerText.set_position((self.rightPos - (self.rOffset+0.65)*self.batWidth,1-(0.1+1+(0.075+0.15))*self.batHeight))
self.batPerText.set_fontsize(self.fontSize)
self.voltsText.set_text('%.1f V' % self.voltage)
self.ampsText.set_text('%.1f A' % self.current)
self.voltsText.set_position((self.rightPos-(self.rOffset+1.3+0.2)*self.batWidth,1-(0.1+0.05)*self.batHeight))
self.ampsText.set_position((self.rightPos-(self.rOffset+1.3+0.2)*self.batWidth,1-self.vertSize-(0.1+0.05+0.1)*self.batHeight))
self.voltsText.set_fontsize(self.fontSize)
self.ampsText.set_fontsize(self.fontSize)
if self.batRemain >= 0:
self.batPerText.set_text(int(self.batRemain))
self.batInRec.set_height(self.batRemain*self.batHeight/100.0)
if self.batRemain/100.0 > 0.5:
self.batInRec.set_facecolor('lawngreen')
elif self.batRemain/100.0 <= 0.5 and self.batRemain/100.0 > 0.2:
self.batInRec.set_facecolor('yellow')
elif self.batRemain/100.0 <= 0.2 and self.batRemain >= 0.0:
self.batInRec.set_facecolor('r')
elif self.batRemain == -1:
self.batInRec.set_height(self.batHeight)
self.batInRec.set_facecolor('k')
def createStateText(self):
'''Creates the mode and arm state text.'''
self.modeText = self.axes.text(self.leftPos+(self.vertSize/10.0),0.97,'UNKNOWN',color='grey',size=1.5*self.fontSize,ha='left',va='top')
self.modeText.set_path_effects([PathEffects.withStroke(linewidth=self.fontSize/10.0,foreground='black')])
def updateStateText(self):
'''Updates the mode and colours red or green depending on arm state.'''
self.modeText.set_position((self.leftPos+(self.vertSize/10.0),0.97))
self.modeText.set_text(self.mode)
self.modeText.set_size(1.5*self.fontSize)
if self.armed:
self.modeText.set_color('red')
self.modeText.set_path_effects([PathEffects.withStroke(linewidth=self.fontSize/10.0,foreground='yellow')])
elif (self.armed == False):
self.modeText.set_color('lightgreen')
self.modeText.set_bbox(None)
self.modeText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='black')])
else:
# Fall back if unknown
self.modeText.set_color('grey')
self.modeText.set_bbox(None)
self.modeText.set_path_effects([PathEffects.withStroke(linewidth=self.fontSize/10.0,foreground='black')])
def createWPText(self):
'''Creates the text for the current and final waypoint,
and the distance to the new waypoint.'''
self.wpText = self.axes.text(self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0),'0/0\n(0 m, 0 s)',color='w',size=self.fontSize,ha='left',va='top')
self.wpText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='black')])
def updateWPText(self):
'''Updates the current waypoint and distance to it.'''
self.wpText.set_position((self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0)))
self.wpText.set_size(self.fontSize)
if type(self.nextWPTime) is str:
self.wpText.set_text('%.f/%.f\n(%.f m, ~ s)' % (self.currentWP,self.finalWP,self.wpDist))
else:
self.wpText.set_text('%.f/%.f\n(%.f m, %.f s)' % (self.currentWP,self.finalWP,self.wpDist,self.nextWPTime))
def createWPPointer(self):
'''Creates the waypoint pointer relative to current heading.'''
self.headingWPTri = patches.RegularPolygon((0.0,0.55),3,0.05,facecolor='lime',zorder=4,ec='k')
self.axes.add_patch(self.headingWPTri)
self.headingWPText = self.axes.text(0.0,0.45,'1',color='lime',size=self.fontSize,horizontalalignment='center',verticalalignment='center',zorder=4)
self.headingWPText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
def adjustWPPointer(self):
'''Adjust the position and orientation of
the waypoint pointer.'''
self.headingWPText.set_size(self.fontSize)
headingRotate = mpl.transforms.Affine2D().rotate_deg_around(0.0,0.0,-self.wpBearing+self.heading)+self.axes.transData
self.headingWPText.set_transform(headingRotate)
angle = self.wpBearing - self.heading
if angle < 0:
angle += 360
if (angle > 90) and (angle < 270):
headRot = angle-180
else:
headRot = angle
self.headingWPText.set_rotation(-headRot)
self.headingWPTri.set_transform(headingRotate)
self.headingWPText.set_text('%.f' % (angle))
def createAltHistoryPlot(self):
'''Creates the altitude history plot.'''
self.altHistRect = patches.Rectangle((self.leftPos+(self.vertSize/10.0),-0.25),0.5,0.5,facecolor='grey',edgecolor='none',alpha=0.4,zorder=4)
self.axes.add_patch(self.altHistRect)
self.altPlot, = self.axes.plot([self.leftPos+(self.vertSize/10.0),self.leftPos+(self.vertSize/10.0)+0.5],[0.0,0.0],color='k',marker=None,zorder=4)
self.altMarker, = self.axes.plot(self.leftPos+(self.vertSize/10.0)+0.5,0.0,marker='o',color='k',zorder=4)
self.altText2 = self.axes.text(self.leftPos+(4*self.vertSize/10.0)+0.5,0.0,'%.f m' % self.relAlt,color='k',size=self.fontSize,ha='left',va='center',zorder=4)
def updateAltHistory(self):
'''Updates the altitude history plot.'''
self.altHist.append(self.relAlt)
self.timeHist.append(self.relAltTime)
# Delete entries older than x seconds
histLim = 10
currentTime = time.time()
point = 0
for i in range(0,len(self.timeHist)):
if (self.timeHist[i] > (currentTime - 10.0)):
break
# Remove old entries
self.altHist = self.altHist[i:]
self.timeHist = self.timeHist[i:]
# Transform Data
x = []
y = []
tmin = min(self.timeHist)
tmax = max(self.timeHist)
x1 = self.leftPos+(self.vertSize/10.0)
y1 = -0.25
altMin = 0
altMax = max(self.altHist)
# Keep alt max for whole mission
if altMax > self.altMax:
self.altMax = altMax
else:
altMax = self.altMax
if tmax != tmin:
mx = 0.5/(tmax-tmin)
else:
mx = 0.0
if altMax != altMin:
my = 0.5/(altMax-altMin)
else:
my = 0.0
for t in self.timeHist:
x.append(mx*(t-tmin)+x1)
for alt in self.altHist:
val = my*(alt-altMin)+y1
# Crop extreme noise
if val < -0.25:
val = -0.25
elif val > 0.25:
val = 0.25
y.append(val)
# Display Plot
self.altHistRect.set_x(self.leftPos+(self.vertSize/10.0))
self.altPlot.set_data(x,y)
self.altMarker.set_data(self.leftPos+(self.vertSize/10.0)+0.5,val)
self.altText2.set_position((self.leftPos+(4*self.vertSize/10.0)+0.5,val))
self.altText2.set_size(self.fontSize)
self.altText2.set_text('%.f m' % self.relAlt)
# =============== Event Bindings =============== #
def on_idle(self, event):
'''To adjust text and positions on rescaling the window when resized.'''
# Check for resize
self.checkReszie()
if self.resized:
# Fix Window Scales
self.rescaleX()
self.calcFontScaling()
# Recalculate Horizon Polygons
self.calcHorizonPoints()
# Update Roll, Pitch, Yaw Text Locations
self.updateRPYLocations()
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARLocations()
# Update Pitch Markers
self.adjustPitchmarkers()
# Update Heading and North Pointer
self.adjustHeadingPointer()
self.adjustNorthPointer()
# Update Battery Bar
self.updateBatteryBar()
# Update Mode and State
self.updateStateText()
# Update Waypoint Text
self.updateWPText()
# Adjust Waypoint Pointer
self.adjustWPPointer()
# Update History Plot
self.updateAltHistory()
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.resized = False
time.sleep(0.05)
def on_timer(self, event):
'''Main Loop.'''
state = self.state
self.loopStartTime = time.time()
if state.close_event.wait(0.001):
self.timer.Stop()
self.Destroy()
return
# Check for resizing
self.checkReszie()
if self.resized:
self.on_idle(0)
# Get attitude information
while state.child_pipe_recv.poll():
objList = state.child_pipe_recv.recv()
for obj in objList:
self.calcFontScaling()
if isinstance(obj,Attitude):
self.oldRoll = self.roll
self.pitch = obj.pitch*180/math.pi
self.roll = obj.roll*180/math.pi
self.yaw = obj.yaw*180/math.pi
# Update Roll, Pitch, Yaw Text Text
self.updateRPYText()
# Recalculate Horizon Polygons
self.calcHorizonPoints()
# Update Pitch Markers
self.adjustPitchmarkers()
elif isinstance(obj,VFR_HUD):
self.heading = obj.heading
self.airspeed = obj.airspeed
self.climbRate = obj.climbRate
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARText()
# Update Heading North Pointer
self.adjustHeadingPointer()
self.adjustNorthPointer()
elif isinstance(obj,Global_Position_INT):
self.relAlt = obj.relAlt
self.relAltTime = obj.curTime
# Update Airpseed, Altitude, Climb Rate Locations
self.updateAARText()
# Update Altitude History
self.updateAltHistory()
elif isinstance(obj,BatteryInfo):
self.voltage = obj.voltage
self.current = obj.current
self.batRemain = obj.batRemain
# Update Battery Bar
self.updateBatteryBar()
elif isinstance(obj,FlightState):
self.mode = obj.mode
self.armed = obj.armState
# Update Mode and Arm State Text
self.updateStateText()
elif isinstance(obj,WaypointInfo):
self.currentWP = obj.current
self.finalWP = obj.final
self.wpDist = obj.currentDist
self.nextWPTime = obj.nextWPTime
if obj.wpBearing < 0.0:
self.wpBearing = obj.wpBearing + 360
else:
self.wpBearing = obj.wpBearing
# Update waypoint text
self.updateWPText()
# Adjust Waypoint Pointer
self.adjustWPPointer()
elif isinstance(obj, FPS):
# Update fps target
self.fps = obj.fps
# Quit Drawing if too early
if (time.time() > self.nextTime):
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.Refresh()
self.Update()
# Calculate next frame time
if (self.fps > 0):
fpsTime = 1/self.fps
self.nextTime = fpsTime + self.loopStartTime
else:
self.nextTime = time.time()
def on_KeyPress(self,event):
'''To adjust the distance between pitch markers.'''
if event.GetKeyCode() == wx.WXK_UP:
self.dist10deg += 0.1
print('Dist per 10 deg: %.1f' % self.dist10deg)
elif event.GetKeyCode() == wx.WXK_DOWN:
self.dist10deg -= 0.1
if self.dist10deg <= 0:
self.dist10deg = 0.1
print('Dist per 10 deg: %.1f' % self.dist10deg)
# Toggle Widgets
elif event.GetKeyCode() == 49: # 1
widgets = [self.modeText,self.wpText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 50: # 2
widgets = [self.batOutRec,self.batInRec,self.voltsText,self.ampsText,self.batPerText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 51: # 3
widgets = [self.rollText,self.pitchText,self.yawText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 52: # 4
widgets = [self.airspeedText,self.altitudeText,self.climbRateText]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 53: # 5
widgets = [self.altHistRect,self.altPlot,self.altMarker,self.altText2]
self.toggleWidgets(widgets)
elif event.GetKeyCode() == 54: # 6
widgets = [self.headingTri,self.headingText,self.headingNorthTri,self.headingNorthText,self.headingWPTri,self.headingWPText]
self.toggleWidgets(widgets)
# Update Matplotlib Plot
self.canvas.draw()
self.canvas.Refresh()
self.Refresh()
self.Update()
| gpl-3.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/MHDfluid.py | 1 | 14477 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import memory_profiler
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
#@profile
def foo():
m = 8
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+ 2
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity, Pressure, Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(Velocity,u0, boundary)
bcb = DirichletBC(Magnetic,b0, boundary)
bcr = DirichletBC(Lagrange,r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, b, p, r) = TrialFunctions(W)
(v, c, q, s) = TestFunctions(W)
kappa = 1.0
Mu_m =10.
MU = 1.0/1
IterType = 'Full'
Split = "Yes"
Saddle = "No"
Stokes = "No"
SetupType = 'python-class'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
MO.PrintStr("Seting up initial guess matricies",2,"=","\n\n","\n")
BCtime = time.time()
BC = MHDsetup.BoundaryIndices(mesh)
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
MO.StrTimePrint("BC index function, time: ", time.time()-BCtime)
Hiptmairtol = 1e-5
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, Hiptmairtol, params)
# ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, FSpaces,F_M,F_NS, 0,0,params,"CD","DG",Saddle,Stokes)
#
# Alin = MHDsetup.Assemble(FSpaces,ns,maxwell,CoupleTerm,Lns,Lmaxwell,0,bcs+BC, "Linear","CD")
# Fnlin,b = MHDsetup.Assemble(FSpaces,ns,maxwell,CoupleTerm,Lns,Lmaxwell,0,bcs+BC, "NonLinear","CD")
# A = Fnlin+Alin
# A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,"CD")
# u = b.duplicate()
#
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
#plot(p_k, interactive = True)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
#print assemble(inner(b,c)*dx).array().shape
#print mat
#ShiftedMass = assemble(inner(mat*b,c)*dx)
#as_vector([inner(b,c)[0]*b_k[0],inner(b,c)[1]*(-b_k[1])])
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
#plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, FSpaces,F_M,F_NS, u_k,b_k,params,IterType,"DG",Saddle,Stokes)
RHSform = forms.PicardRHS(mesh, FSpaces, u_k, p_k, b_k, r_k, params,"DG",Saddle,Stokes)
bcu = DirichletBC(Velocity,Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(Magnetic,Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(Lagrange,Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
parameters['linear_algebra_backend'] = 'uBLAS'
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
if IterType == "CD":
MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n")
Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType)
Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
u = b.duplicate()
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-5
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
AssembleTime = time.time()
if IterType == "CD":
MO.StrTimePrint("MHD CD RHS assemble, time: ", time.time()-AssembleTime)
b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "CD",IterType)
else:
MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n")
if iter == 1:
Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType)
Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
u = b.duplicate()
else:
Fnline,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
# AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
# A,b = CP.Assemble(AA,bb)
# if iter == 1:
MO.StrTimePrint("MHD total assemble, time: ", time.time()-AssembleTime)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm(PETSc.NormType.NORM_INFINITY)
u = b.duplicate()
#A,Q
if IterType == 'Full':
n = FacetNormal(mesh)
mat = as_matrix([[b_k[1]*b_k[1],-b_k[1]*b_k[0]],[-b_k[1]*b_k[0],b_k[0]*b_k[0]]])
F = CP.Scipy2PETSc(Fnlin[0])
a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1/2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1/2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(a)
bcu.apply(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
F = CP.Scipy2PETSc(Fnlin[0])
kspF = NSprecondSetup.LSCKSPnonlinear(F)
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Directclass',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time()- stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += nsits
SolutionTime += Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
#
# ExactSolution = [u0,p0,b0,r0]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "DG")
#
# if xx > 1:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
#
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
#
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
#
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
#
#
#
#
# import pandas as pd
#
#
#
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable
#
#
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable
#
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(u0,Velocity))
#
# plot(p_k)
#
# plot(interpolate(p0,Pressure))
#
# plot(b_k)
# plot(interpolate(b0,Magnetic))
#
# plot(r_k)
# plot(interpolate(r0,Lagrange))
#
# interactive()
interactive()
foo()
| mit |
gaoshuming/udacity | tutorials/weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
cjayb/mne-python | mne/viz/tests/test_topo.py | 2 | 11648 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Robert Luke <mail@robertluke.net>
#
# License: Simplified BSD
import os.path as op
from collections import namedtuple
import numpy as np
import pytest
import matplotlib
import matplotlib.pyplot as plt
from mne import (read_events, Epochs, pick_channels_evoked, read_cov,
compute_proj_evoked)
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
from mne.viz import (plot_topo_image_epochs, _get_presser,
mne_analyze_colormap, plot_evoked_topo)
from mne.viz.evoked import _line_plot_onselect
from mne.viz.utils import _fake_click
from mne.viz.topo import (_plot_update_evoked_topo_proj, iter_topography,
_imshow_tfr)
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
layout = read_layout('Vectorview-all')
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return [0, 1, 2, 6, 7, 8, 306, 340, 341, 342] # take a only few channels
def _get_epochs():
"""Get epochs."""
raw = read_raw_fif(raw_fname)
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)
# bad proj warning
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = read_raw_fif(raw_fname)
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
with pytest.warns(RuntimeWarning, match='projection'):
epochs_delayed_ssp = Epochs(
raw, events[:10], event_id, tmin, tmax, picks=picks,
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_joint():
"""Test joint plot."""
evoked = _get_epochs().average()
evoked.plot_joint(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
def return_inds(d): # to test function kwarg to zorder arg of evoked.plot
return list(range(d.shape[0]))
evoked.plot_joint(title='test', topomap_args=dict(contours=0, res=8,
time_unit='ms'),
ts_args=dict(spatial_colors=True, zorder=return_inds,
time_unit='s'))
pytest.raises(ValueError, evoked.plot_joint, ts_args=dict(axes=True,
time_unit='s'))
axes = plt.subplots(nrows=3)[-1].flatten().tolist()
evoked.plot_joint(times=[0], picks=[6, 7, 8], ts_args=dict(axes=axes[0]),
topomap_args={"axes": axes[1:], "time_unit": "s"})
with pytest.raises(ValueError, match='array of length 6'):
evoked.plot_joint(picks=[6, 7, 8], ts_args=dict(axes=axes[0]),
topomap_args=dict(axes=axes[2:]))
plt.close('all')
# test proj options
assert len(evoked.info['projs']) == 0
evoked.pick_types(meg=True)
evoked.add_proj(compute_proj_evoked(
evoked, n_mag=1, n_grad=1, meg='combined'))
assert len(evoked.info['projs']) == 1
with pytest.raises(ValueError, match='must match ts_args'):
evoked.plot_joint(ts_args=dict(proj=True),
topomap_args=dict(proj=False))
evoked.plot_joint(ts_args=dict(proj='reconstruct'),
topomap_args=dict(proj='reconstruct'))
plt.close('all')
def test_plot_topo():
"""Test plotting of ERP topography."""
# Show topography
evoked = _get_epochs().average()
# should auto-find layout
plot_evoked_topo([evoked, evoked], merge_grads=True,
background_color='w')
picked_evoked = evoked.copy().pick_channels(evoked.ch_names[:3])
picked_evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
# test scaling
for ylim in [dict(mag=[-600, 600]), None]:
plot_evoked_topo([picked_evoked] * 2, layout, ylim=ylim)
for evo in [evoked, [evoked, picked_evoked]]:
pytest.raises(ValueError, plot_evoked_topo, evo, layout,
color=['y', 'b'])
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster
picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
ch_names)
fig = plot_evoked_topo(picked_evoked_delayed_ssp, layout,
proj='interactive')
func = _get_presser(fig)
event = namedtuple('Event', ['inaxes', 'xdata', 'ydata'])
func(event(inaxes=fig.axes[0], xdata=fig.axes[0]._mne_axs[0].pos[0],
ydata=fig.axes[0]._mne_axs[0].pos[1]))
func(event(inaxes=fig.axes[0], xdata=0, ydata=0))
params = dict(evokeds=[picked_evoked_delayed_ssp],
times=picked_evoked_delayed_ssp.times,
fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
bools = [True] * len(params['projs'])
with pytest.warns(RuntimeWarning, match='projection'):
_plot_update_evoked_topo_proj(params, bools)
# should auto-generate layout
plot_evoked_topo(picked_evoked_eeg.copy(),
fig_background=np.zeros((4, 3, 3)), proj=True,
background_color='k')
# Test RMS plot of grad pairs
picked_evoked.plot_topo(merge_grads=True, background_color='w')
plt.close('all')
for ax, idx in iter_topography(evoked.info, legend=True):
ax.plot(evoked.data[idx], color='red')
# test status bar message
if idx != -1:
assert (evoked.ch_names[idx] in ax.format_coord(.5, .5))
assert idx == -1
plt.close('all')
cov = read_cov(cov_fname)
cov['projs'] = []
evoked.pick_types(meg=True).plot_topo(noise_cov=cov)
plt.close('all')
# test plot_topo
evoked.plot_topo() # should auto-find layout
_line_plot_onselect(0, 200, ['mag', 'grad'], evoked.info, evoked.data,
evoked.times)
plt.close('all')
for ax, idx in iter_topography(evoked.info): # brief test with false
ax.plot([0, 1, 2])
break
plt.close('all')
def test_plot_topo_nirs(fnirs_evoked):
"""Test plotting of ERP topography for nirs data."""
fnirs_evoked.pick(picks='hbo')
fig = plot_evoked_topo(fnirs_evoked)
assert len(fig.axes) == 1
plt.close('all')
def test_plot_topo_single_ch():
"""Test single channel topoplot with time cursor."""
evoked = _get_epochs().average()
evoked2 = evoked.copy()
# test plotting several evokeds on different time grids
evoked.crop(-.19, 0)
evoked2.crop(.05, .19)
fig = plot_evoked_topo([evoked, evoked2], background_color='w')
# test status bar message
ax = plt.gca()
assert ('MEG 0113' in ax.format_coord(.065, .63))
num_figures_before = len(plt.get_fignums())
_fake_click(fig, fig.axes[0], (0.08, 0.65))
assert num_figures_before + 1 == len(plt.get_fignums())
fig = plt.gcf()
ax = plt.gca()
_fake_click(fig, ax, (.5, .5), kind='motion') # cursor should appear
assert (isinstance(ax._cursorline, matplotlib.lines.Line2D))
_fake_click(fig, ax, (1.5, 1.5), kind='motion') # cursor should disappear
assert ax._cursorline is None
plt.close('all')
def test_plot_topo_image_epochs():
"""Test plotting of epochs image topography."""
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
epochs.load_data()
cmap = mne_analyze_colormap(format='matplotlib')
data_min = epochs._data.min()
plt.close('all')
fig = plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title, cmap=cmap)
assert epochs._data.min() == data_min
num_figures_before = len(plt.get_fignums())
_fake_click(fig, fig.axes[0], (0.08, 0.64))
assert num_figures_before + 1 == len(plt.get_fignums())
# test for auto-showing a colorbar when only 1 sensor type
ep = epochs.copy().pick_types(meg=False, eeg=True)
fig = plot_topo_image_epochs(ep, vmin=None, vmax=None, colorbar=None,
cmap=cmap)
ax = [x for x in fig.get_children() if isinstance(x, matplotlib.axes.Axes)]
qm_cmap = [y.cmap for x in ax for y in x.get_children()
if isinstance(y, matplotlib.collections.QuadMesh)]
assert qm_cmap[0] is cmap
plt.close('all')
def test_plot_tfr_topo():
"""Test plotting of TFR data."""
epochs = _get_epochs()
n_freqs = 3
nave = 1
data = np.random.RandomState(0).randn(len(epochs.ch_names),
n_freqs, len(epochs.times))
tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
plt.close('all')
fig = tfr.plot_topo(baseline=(None, 0), mode='ratio',
title='Average power', vmin=0., vmax=14.)
# test opening tfr by clicking
num_figures_before = len(plt.get_fignums())
# could use np.reshape(fig.axes[-1].images[0].get_extent(), (2, 2)).mean(1)
with pytest.warns(None): # on old mpl (at least 2.0) there is a warning
_fake_click(fig, fig.axes[0], (0.08, 0.65))
assert num_figures_before + 1 == len(plt.get_fignums())
plt.close('all')
tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
pytest.raises(ValueError, tfr.plot, [4], yscale='lin', show=False)
# nonuniform freqs
freqs = np.logspace(*np.log10([3, 10]), num=3)
tfr = AverageTFR(epochs.info, data, epochs.times, freqs, nave)
fig = tfr.plot([4], baseline=(None, 0), mode='mean', vmax=14., show=False)
assert fig.axes[0].get_yaxis().get_scale() == 'log'
# one timesample
tfr = AverageTFR(epochs.info, data[:, :, [0]], epochs.times[[1]],
freqs, nave)
with pytest.warns(None): # matplotlib equal left/right
tfr.plot([4], baseline=None, vmax=14., show=False, yscale='linear')
# one frequency bin, log scale required: as it doesn't make sense
# to plot log scale for one value, we test whether yscale is set to linear
vmin, vmax = 0., 2.
fig, ax = plt.subplots()
tmin, tmax = epochs.times[0], epochs.times[-1]
with pytest.warns(RuntimeWarning, match='not masking'):
_imshow_tfr(ax, 3, tmin, tmax, vmin, vmax, None, tfr=data[:, [0], :],
freq=freqs[[-1]], x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='log')
fig = plt.gcf()
assert fig.axes[0].get_yaxis().get_scale() == 'linear'
# ValueError when freq[0] == 0 and yscale == 'log'
these_freqs = freqs[:3].copy()
these_freqs[0] = 0
with pytest.warns(RuntimeWarning, match='not masking'):
pytest.raises(ValueError, _imshow_tfr, ax, 3, tmin, tmax, vmin, vmax,
None, tfr=data[:, :3, :], freq=these_freqs, x_label=None,
y_label=None, colorbar=False, cmap=('RdBu_r', True),
yscale='log')
run_tests_if_main()
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/metrics/tests/test_ranking.py | 75 | 40883 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
zackriegman/pydnn | docs/conf.py | 1 | 12621 | # -*- coding: utf-8 -*-
#
# pydnn documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 24 09:05:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# http://docs.readthedocs.org/en/latest/faq.html
# from mock import Mock as MagicMock
# class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
# MOCK_MODULES = ['numpy',
# 'scipy',
# 'theano',
# # 'yaml',
# 'pandas']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/
import mock
for mod_name in ['numpy',
'distutils',
'scipy',
'scipy.misc',
'scipy.ndimage',
'theano',
'theano.tensor.signal',
'theano.tensor.nnet',
'theano.tensor',
'theano.ifelse',
'theano.printing',
'theano.gof',
'theano.gof.graph',
'gof.graph',
'boto.ec2',
'boto.ec2.blockdevicemapping',
# 'yaml',
'pandas']:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo'
# 'sphinx.ext.viewcode',
# 'sphinx.ext.pngmath'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pydnn'
copyright = u'2015, Isaac Kriegman'
author = u'Isaac Kriegman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydnndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pydnn.tex', u'pydnn Documentation',
u'Isaac Kriegman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pydnn', u'pydnn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pydnn', u'pydnn Documentation',
author, 'pydnn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# def skip(app, what, name, obj, skip, options):
# if name == "__init__":
# return False
# return skip
#
# def setup(app):
# app.connect("autodoc-skip-member", skip) | mit |
robbymeals/scikit-learn | sklearn/utils/tests/test_extmath.py | 130 | 16270 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
| bsd-3-clause |
jbedorf/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 24 | 5843 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
_MODULE_PATH = os.path.dirname(__file__)
_DEFAULT_DATA_FILE = os.path.join(_MODULE_PATH, "data/period_trend.csv")
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
input_filename = FLAGS.input_filename
if input_filename is None:
input_filename = _DEFAULT_DATA_FILE
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(input_filename))
make_plot("AR", *ar_train_and_predict(input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=False,
help="Input csv file (omit to use the data/period_trend.csv).")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/tests/test_lines.py | 5 | 5451 | """
Tests specific to the lines module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import itertools
import matplotlib.lines as mlines
import nose
from nose.tools import assert_true, assert_raises
from timeit import repeat
import numpy as np
from cycler import cycler
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, image_comparison
@cleanup
def test_invisible_Line_rendering():
"""
Github issue #1256 identified a bug in Line.draw method
Despite visibility attribute set to False, the draw method was not
returning early enough and some pre-rendering code was executed
though not necessary.
Consequence was an excessive draw time for invisible Line instances
holding a large number of points (Npts> 10**6)
"""
# Creates big x and y data:
N = 10**7
x = np.linspace(0,1,N)
y = np.random.normal(size=N)
# Create a plot figure:
fig = plt.figure()
ax = plt.subplot(111)
# Create a "big" Line instance:
l = mlines.Line2D(x,y)
l.set_visible(False)
# but don't add it to the Axis instance `ax`
# [here Interactive panning and zooming is pretty responsive]
# Time the canvas drawing:
t_no_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# (gives about 25 ms)
# Add the big invisible Line:
ax.add_line(l)
# [Now interactive panning and zooming is very slow]
# Time the canvas drawing:
t_unvisible_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# gives about 290 ms for N = 10**7 pts
slowdown_factor = (t_unvisible_line/t_no_line)
slowdown_threshold = 2 # trying to avoid false positive failures
assert_true(slowdown_factor < slowdown_threshold)
@cleanup
def test_set_line_coll_dash():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
# Testing setting linestyles for line collections.
# This should not produce an error.
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
assert True
@image_comparison(baseline_images=['line_dashes'], remove_text=True)
def test_line_dashes():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), linestyle=(0, (3, 3)), lw=5)
@cleanup
def test_line_colors():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), color='none')
ax.plot(range(10), color='r')
ax.plot(range(10), color='.3')
ax.plot(range(10), color=(1, 0, 0, 1))
ax.plot(range(10), color=(1, 0, 0))
fig.canvas.draw()
assert True
@cleanup
def test_linestyle_variants():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for ls in ["-", "solid", "--", "dashed",
"-.", "dashdot", ":", "dotted"]:
ax.plot(range(10), linestyle=ls)
fig.canvas.draw()
assert True
@cleanup
def test_valid_linestyles():
line = mlines.Line2D([], [])
with assert_raises(ValueError):
line.set_linestyle('aardvark')
@cleanup
def test_drawstyle_variants():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for ds in ("default", "steps-mid", "steps-pre", "steps-post",
"steps", None):
ax.plot(range(10), drawstyle=ds)
fig.canvas.draw()
assert True
@cleanup
def test_valid_drawstyles():
line = mlines.Line2D([], [])
with assert_raises(ValueError):
line.set_drawstyle('foobar')
@image_comparison(baseline_images=['line_collection_dashes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
@image_comparison(baseline_images=['marker_fill_styles'], remove_text=True,
extensions=['png'])
def test_marker_fill_styles():
colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
altcolor = 'lightgreen'
y = np.array([1, 1])
x = np.array([0, 9])
fig, ax = plt.subplots()
for j, marker in enumerate(mlines.Line2D.filled_markers):
for i, fs in enumerate(mlines.Line2D.fillStyles):
color = next(colors)
ax.plot(j * 10 + x, y + i + .5 * (j % 2),
marker=marker,
markersize=20,
markerfacecoloralt=altcolor,
fillstyle=fs,
label=fs,
linewidth=5,
color=color,
markeredgecolor=color,
markeredgewidth=2)
ax.set_ylim([0, 7.5])
ax.set_xlim([-5, 155])
@image_comparison(baseline_images=['scaled_lines'], style='default')
def test_lw_scaling():
th = np.linspace(0, 32)
fig, ax = plt.subplots()
lins_styles = ['dashed', 'dotted', 'dashdot']
cy = cycler(matplotlib.rcParams['axes.prop_cycle'])
for j, (ls, sty) in enumerate(zip(lins_styles, cy)):
for lw in np.linspace(.5, 10, 10):
ax.plot(th, j*np.ones(50) + .1 * lw, linestyle=ls, lw=lw, **sty)
def test_nan_is_sorted():
line = mlines.Line2D([], [])
assert_true(line._is_sorted(np.array([1, 2, 3])))
assert_true(line._is_sorted(np.array([1, np.nan, 3])))
assert_true(not line._is_sorted([3, 5] + [np.nan] * 100 + [0, 2]))
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
ch3ll0v3k/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
okadate/romspy | romspy/tplot/tplot_wind.py | 1 | 3991 | # coding: utf-8
# (c) 2015-11-28 Teruhisa Okada
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import netCDF4
import numpy as np
import pandas as pd
import romspy
def stick_plot_default(time, u, v, **kw):
from matplotlib.dates import date2num
#width = kw.pop('width', 0.002)
#headwidth = kw.pop('headwidth', 0)
#headlength = kw.pop('headlength', 0)
#headaxislength = kw.pop('headaxislength', 0)
angles = kw.pop('angles', 'uv')
ax = kw.pop('ax', None)
if angles != 'uv':
raise AssertionError("Stickplot angles must be 'uv' so that"
"if *U*==*V* the angle of the arrow on"
"the plot is 45 degrees CCW from the *x*-axis.")
uv = np.sqrt((u**2 + v**2))
time, u, v, uv = map(np.asanyarray, (time, u, v, uv))
if not ax:
fig, ax = plt.subplots()
q = ax.quiver(date2num(time), [[0]*len(time)], u, v, uv, angles='uv',
#width=width, headwidth=headwidth,
#headlength=headlength, headaxislength=headaxislength,
**kw)
ax.axes.get_yaxis().set_visible(False)
ax.xaxis_date()
return q
def stick_plot(time, u, v, **kw):
from matplotlib.dates import date2num
angles = kw.pop('angles', 'uv')
ax = kw.pop('ax', None)
if angles != 'uv':
raise AssertionError("Stickplot angles must be 'uv' so that"
"if *U*==*V* the angle of the arrow on"
"the plot is 45 degrees CCW from the *x*-axis.")
time, u, v = map(np.asanyarray, (time, u, v))
if not ax:
fig, ax = plt.subplots()
q = ax.quiver(date2num(time), [[0]*len(time)], u, v, angles='uv', **kw)
ax.axes.get_yaxis().set_visible(False)
ax.xaxis_date()
return q
def rolling_mean(date, u, v, hours):
df = pd.DataFrame({'u':u, 'v':v}, index=date)
df = pd.rolling_mean(df, window=hours, center=True)
return date, df.u.values, df.v.values
def resample(date, u, v, **kw):
rule = kw.pop('resample', 'D')
if rule == 'H':
loffset = '-30min'
elif rule == '3H':
loffset = '-1.5H'
elif rule == '12H':
loffset = '-6H'
elif rule == 'D':
loffset = '-12H'
elif rule == 'M':
loffset = '-15D'
df = pd.DataFrame({'u':u, 'v':v}, index=date)
df = df.resample(rule, loffset=loffset)
print df
#time = [dt64.astype('M8[s]').astype('O') for dt64 in df.index.values]
return df.index.values.astype('M8[s]').astype('O'), df.u.values, df.v.values
def tplot_wind(ncfile, dates, x, y, **kw):
print ncfile, dates, x, y, kw.keys()
ax = kw.pop('ax', None)
date_format = kw.pop('date_format', '%Y-%m')
nc = netCDF4.Dataset(ncfile, 'r')
try:
time = nc.variables['time_wind'][:]
except:
time = nc.variables['wind_time'][:]
date = netCDF4.num2date(time, romspy.JST_days)
u = nc.variables['Uwind'][:,y,x]
v = nc.variables['Vwind'][:,y,x]
#date, u, v = rolling_mean(date, u, v, hours=hours)
date, u, v = resample(date, u, v, **kw)
q = stick_plot(date, u, v, ax=ax, width=0.002, scale=100, units='width')
ax.set_xlim(dates[0], dates[-1])
ax.xaxis.set_major_formatter(DateFormatter(date_format))
ref = 5
ref_str = "%s m s$^{-1}$" % ref
ax.quiverkey(q, 0.1, 0.75, ref, ref_str, labelpos='N', coordinates='axes')
if __name__ == '__main__':
from datetime import datetime
import seaborn as sns
ncfile = '/home/okada/Data/ob500_frc_wind_2012_rbf.nc'
dates = [datetime(2012,1,1,0), datetime(2013,1,1,0)]
dates = [datetime(2012,1,1,0), datetime(2012,1,7,0)]
fig, ax = plt.subplots(figsize=(11, 2))
#tplot_wind(ncfile, dates, 0, 0, hours=12, ax=ax)
tplot_wind(ncfile, dates, 0, 0, resample='H', ax=ax)
plt.show()
#plt.savefig('check_wind.png')
| mit |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/nolearn/decaf.py | 11 | 6033 | import os
import sys
from nolearn import cache
import numpy as np
from sklearn.base import BaseEstimator
def _transform_cache_key(self, X):
if len(X) == 1:
raise cache.DontCache
return ','.join([
str(X),
str(len(X)),
str(sorted(self.get_params().items())),
])
class ConvNetFeatures(BaseEstimator):
"""Extract features from images using a pretrained ConvNet.
Based on Yangqing Jia and Jeff Donahue's `DeCAF
<https://github.com/UCB-ICSI-Vision-Group/decaf-release/wiki>`_.
Please make sure you read and accept DeCAF's license before you
use this class.
If ``classify_direct=False``, expects its input X to be a list of
image filenames or arrays as produced by
`np.array(Image.open(filename))`.
"""
verbose = 0
def __init__(
self,
feature_layer='fc7_cudanet_out',
pretrained_params='imagenet.decafnet.epoch90',
pretrained_meta='imagenet.decafnet.meta',
center_only=True,
classify_direct=False,
verbose=0,
):
"""
:param feature_layer: The ConvNet layer that's used for
feature extraction. Defaults to
`fc7_cudanet_out`. A description of all
available layers for the
ImageNet-1k-pretrained ConvNet is found
in the DeCAF wiki. They are:
- `pool5_cudanet_out`
- `fc6_cudanet_out`
- `fc6_neuron_cudanet_out`
- `fc7_cudanet_out`
- `fc7_neuron_cudanet_out`
- `probs_cudanet_out`
:param pretrained_params: This must point to the file with the
pretrained parameters. Defaults to
`imagenet.decafnet.epoch90`. For
the ImageNet-1k-pretrained ConvNet
this file can be obtained from here:
http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/
:param pretrained_meta: Similar to `pretrained_params`, this
must file to the file with the
pretrained parameters' metadata.
Defaults to `imagenet.decafnet.meta`.
:param center_only: Use the center patch of the image only
when extracting features. If `False`, use
four corners, the image center and flipped
variants and average a total of 10 feature
vectors, which will usually yield better
results. Defaults to `True`.
:param classify_direct: When `True`, assume that input X is an
array of shape (num x 256 x 256 x 3)
as returned by `prepare_image`.
"""
self.feature_layer = feature_layer
self.pretrained_params = pretrained_params
self.pretrained_meta = pretrained_meta
self.center_only = center_only
self.classify_direct = classify_direct
self.net_ = None
if (not os.path.exists(pretrained_params) or
not os.path.exists(pretrained_meta)):
raise ValueError(
"Pre-trained ConvNet parameters not found. You may"
"need to download the files from "
"http://www.eecs.berkeley.edu/~jiayq/decaf_pretrained/ and "
"pass the path to the two files as `pretrained_params` and "
"`pretrained_meta` to the `{}` estimator.".format(
self.__class__.__name__))
def fit(self, X=None, y=None):
from decaf.scripts.imagenet import DecafNet # soft dep
if self.net_ is None:
self.net_ = DecafNet(
self.pretrained_params,
self.pretrained_meta,
)
return self
@cache.cached(_transform_cache_key)
def transform(self, X):
features = []
for img in X:
if self.classify_direct:
images = self.net_.oversample(
img, center_only=self.center_only)
self.net_.classify_direct(images)
else:
if isinstance(img, str):
import Image # soft dep
img = np.array(Image.open(img))
self.net_.classify(img, center_only=self.center_only)
feat = None
for layer in self.feature_layer.split(','):
val = self.net_.feature(layer)
if feat is None:
feat = val
else:
feat = np.hstack([feat, val])
if not self.center_only:
feat = feat.flatten()
features.append(feat)
if self.verbose:
sys.stdout.write(
"\r[ConvNet] %d%%" % (100. * len(features) / len(X)))
sys.stdout.flush()
if self.verbose:
sys.stdout.write('\n')
return np.vstack(features)
def prepare_image(self, image):
"""Returns image of shape `(256, 256, 3)`, as expected by
`transform` when `classify_direct = True`.
"""
from decaf.util import transform # soft dep
_JEFFNET_FLIP = True
# first, extract the 256x256 center.
image = transform.scale_and_extract(transform.as_rgb(image), 256)
# convert to [0,255] float32
image = image.astype(np.float32) * 255.
if _JEFFNET_FLIP:
# Flip the image if necessary, maintaining the c_contiguous order
image = image[::-1, :].copy()
# subtract the mean
image -= self.net_._data_mean
return image
| bsd-3-clause |
yw374cornell/e-mission-server | bin/regenerate_data.py | 2 | 2686 | # Regenerate score and footprint information for a set of users. In the
# production system, we only calculate the score for users who were part of the
# "game" group and the footprint for users who were part of the "data" group.
# When we want to analyse their engagement, though, we want to compare data
# across all groups. This fills in the gaps for the results that were not
# calculated initially.
# NOTE: The following values are hardcoded here. If this script is used on an
# ongoing basis, we should make them available as parameters somehow.
#
CFC_WEBAPP_PATH = "/Users/shankari/e-mission/e-mission-server/CFC_WebApp/"
START_DATE = "2014/12/1"
END_DATE = "2014/12/31"
UUID_GROUP_CSV = "/Users/shankari/e-mission/e-mission-data/client_stats/EMissions_Survey_i232_Fall_2014.csv_with_group"
import pandas as pd
from uuid import UUID
import sys
sys.path.append(CFC_WEBAPP_PATH)
from get_database import get_section_db, get_result_stats_db
from clients.data import data
from clients.gamified import gamified
from clients.choice import choice
from main import stats
from dao.user import User
import datetime as pydt
import time as pytime
def fixClientTimestamps(origDate):
"""
Note that all the stats will have the current timestamp in them. But
in order to run analyses, we need to fix them so that the timestamps
are from the correct date. This method fixes all timestamps from the past hour
(recently created and not yet fixed) to be from the origDate specified
"""
utctimestamp = lambda date: (date - pydt.datetime.utcfromtimestamp(0)).total_seconds()
origTs = utctimestamp(origDate)
now = pydt.datetime.utcnow()
hourago = now + pydt.timedelta(hours = -1)
get_result_stats_db().update({"ts": {"$gt": utctimestamp(hourago), "$lt": utctimestamp(now)}}, {"$set": {"ts": origTs}})
def regenerateData():
dec_days = pd.date_range(START_DATE, END_DATE)
uuidDF = pd.read_csv(UUID_GROUP_CSV)
for date in dec_days:
print "Generating scores for day %s" % date
for uuidStr in uuidDF.uuid:
uuid = UUID(uuidStr)
print "Generating scores for uuid %s " % uuid
choice.runBackgroundTasksForDay(uuid, date)
# Get the number of trips for a day
dateQuery = {"section_start_datetime": {"$gt": date, "$lt": date + pydt.timedelta(days=1)}}
tripsForDay = get_section_db().find({"$and": [{"user_id" : uuid}, dateQuery]}).count()
stats.storeServerEntry(uuid, stats.STAT_TRIP_MGR_TRIPS_FOR_DAY, pytime.time(), tripsForDay)
fixClientTimestamps(date)
if __name__ == '__main__':
import sys
regenerateData()
| bsd-3-clause |
Monika319/EWEF-1 | Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W3.py | 1 | 1316 | # -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad5_033f.txt"]
for NazwaPliku in files:
print NazwaPliku
Plik=open(NazwaPliku)
#print DeltaT
Dane=Plik.readlines()#[4:]
DeltaT=float(Dane[2].split()[3].replace(",","."))
#M=len(Dane[4].split())/2
M=2
Dane=Dane[5:]
Plik.close()
print M
Ys=[np.zeros(len(Dane)) for i in range(M)]
for m in range(M):
for i in range(len(Dane)):
try:
Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
except:
print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
#print i, Y[i]
X=np.zeros_like(Ys[0])
for i in range(len(X)):
X[i]=i*DeltaT
for y in Ys:
print max(y)-min(y)
Opis=u"Układ szeregowy\nJedna trzecia częstotliwości rezonansowej"
Nazwa=u"Z5W3"
plt.title(u"Przebieg napięciowy\n"+Opis)
plt.xlabel(u"Czas t [s]")
plt.ylabel(u"Napięcie [V]")
plt.plot(X,Ys[0],label=u"Wejście")
plt.plot(X,Ys[1],label=u"Wyjście")
plt.grid()
plt.legend(loc="best")
plt.savefig(Nazwa + ".png", bbox_inches='tight')
plt.show()
| gpl-2.0 |
stylianos-kampakis/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
rhyswhitley/savanna_iav | src/figures/trends/rainfall_compare.py | 1 | 4100 | #!/usr/bin/env python2
import os
import numpy as np
import pandas as pd
import netCDF4 as nc
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from scipy import stats
def fit_trend(x, y):
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return {'model': p, 'slope': slope, 'int': intercept, \
'r2': r_value, 'se': std_err, 'p': p_value}
def add_trend(pObj, xseries, yseries, col, header):
# get linear trend line
xtrans = mdates.date2num(xseries.to_pydatetime())
trend = fit_trend(xtrans, yseries)
# make the significant slopes stand out more
if trend['p'] < 0.05:
sig_lw = 2.5
sig_alpha = 1
else:
sig_lw = 1.5
sig_alpha = 0.5
# create a label to shows statistics
trend_lab = '{0}: slope = {1:.2f}, p = {2:.3f}' \
.format(header, trend['slope'], trend['p'])
# plot trend line
pObj.plot(xseries, trend['model'](xtrans), '-', c=col, \
label=trend_lab, alpha=sig_alpha, lw=sig_lw)
def get_value(nc_obj, label):
return nc_obj.variables[label][:].flatten()
def get_dataframe(ncdf_con):
"""
A quick function to transform a netcdf file into a pandas dataframe that
can be used for analysis and plotting. Attributes are extracted using
in built netCDF4 library functions. Time is arbitrary and needs to be
set by the user.
"""
# number of rows, equivalent to time-steps
time_len = len(ncdf_con.dimensions['time'])
# the header values for each measurements; excludes time and space components
data_values = ["Rainfall", "clim_Rainfall"]
# create a new dataframe from the netCDF file
nc_dataframe = pd.DataFrame({label: get_value(ncdf_con, label) \
for label in data_values}, \
index=pd.date_range("2001-01-01 00:30:00", \
periods=time_len, freq="30min"))
return nc_dataframe
def plot_rainfall():
# make a connection to the netCDF file
nc_conn = nc.Dataset(FILEPATH, 'r', format="NETCDF4")
# transform netCDF file into a useable dataframe
nc_data = get_dataframe(nc_conn)
annual_rain = nc_data.resample('AS', 'sum').ix[:-1, :]
fig = plt.figure(figsize=(7, 5))
grid = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(grid[0])
p1, = ax1.plot(annual_rain.index, annual_rain.Rainfall, 'o--', alpha=0.7, label='Tower Observation')
p2, = ax1.plot(annual_rain.index, annual_rain.clim_Rainfall, 'o-', label='Climatology')
add_trend(ax1, annual_rain.index, annual_rain.Rainfall, p1.get_color(), 'Tower Trend')
clim_mean = annual_rain.clim_Rainfall.mean()
vary_mean = annual_rain.Rainfall.mean()
# ax1.axhline(vary_mean, c='cyan')
# ax1.annotate('Mean PPT is {0:.0f} mm'.format(vary_mean), fontsize=10, \
# xy=("2003-01-01", vary_mean), xytext=("2003-01-01", 500), \
# arrowprops=dict(facecolor='cyan', edgecolor='none', shrink=0.05))
ax1.annotate('Mean PPT is {0:.0f} mm'.format(clim_mean), fontsize=10, \
xy=("2010-01-01", clim_mean), xytext=("2010-01-01", 500), \
arrowprops=dict(facecolor=p2.get_color(), edgecolor='none', shrink=0.05))
ax1.set_ylabel("Annual Rainfall (mm)", fontsize=12)
ax1.set_title("Howard Springs:: trends in rainfall", fontsize=13)
newax1 = pd.date_range("2001", periods=14, freq='AS')
ax1.xaxis.set_ticks(newax1)
ax1.xaxis.set_ticklabels(newax1, rotation=45, ha="right", fontsize=11)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.legend(loc='upper left', ncol=1)
plt.ylim([0, 3000])
plt.subplots_adjust(left=0.1, bottom=0.1, top=0.95, right=0.98)
plt.show()
return 1
if __name__ == "__main__":
FILEPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/ncdf/spa_hws_inputs.nc")
plot_rainfall()
| cc0-1.0 |
cbertinato/pandas | pandas/tests/frame/test_reshape.py | 1 | 39296 | from datetime import datetime
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame(columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('w', 'b', 'j')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two'],
['a', 'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=['first', 'second', 'third'])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with pytest.raises(ValueError):
df.unstack('c1')
with pytest.raises(ValueError):
df.T.stack('c1')
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, codes)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', np.nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [np.nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [np.nan] * 3 +
['z'] * 4 + [np.nan] * 3 + ['x'] * 3 + [np.nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([np.nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([np.nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2),
'C': np.arange(10)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', np.nan, np.nan, 680585148],
['U', 0.0, np.nan, 680585148],
['Pb', 7.07e-06, np.nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = np.nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = np.nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
codes=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('labels', [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(['a', 'a', 'b', 'c'])
df = pd.DataFrame({"A": cat, "B": cat})
result = df.stack()
index = pd.MultiIndex.from_product([[0, 1, 2, 3], ['A', 'B']])
expected = pd.Series(pd.Categorical(['a', 'a', 'a', 'a',
'b', 'b', 'c', 'c']),
index=index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('level', [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 1)],
names=['a', 'b'])
df = pd.DataFrame({"A": pd.core.arrays.integer_array([0, 1, None]),
"B": pd.Categorical(['a', 'a', 'b'])}, index=index)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = pd.Series([df.A.dtype] * 2 + [df.B.dtype] * 2,
index=result.columns)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, 'baz'])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = pd.MultiIndex.from_product([[0], ['d', 'c']],
names=['bar', 'baz'])
df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=['B', 'A'])
df.columns.name = 'foo'
expected = pd.DataFrame([
[3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples([
('c', 'A'), ('c', 'B'), ('d', 'A'), ('d', 'B')], names=[
'baz', 'foo']))
expected.index.name = 'bar'
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = pd.Series(['a', 'b', 'c', 'a'], dtype='object')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = pd.DataFrame(
{'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]},
index=list('xyz')
)
assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value='d')
expected = pd.DataFrame(
{'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']},
index=list('xyz')
)
assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = pd.DataFrame({
'timestamp': [
pd.Timestamp('2017-08-27 01:00:00.709949+0000', tz='UTC')],
'a': ['a'],
'b': ['b'],
'c': ['c'],
}, columns=['timestamp', 'a', 'b', 'c'])
result = df.set_index(['a', 'b']).unstack()
expected = pd.DataFrame([[pd.Timestamp('2017-08-27 01:00:00.709949+0000',
tz='UTC'),
'c']],
index=pd.Index(['a'], name='a'),
columns=pd.MultiIndex(
levels=[['timestamp', 'c'], ['b']],
codes=[[0, 1], [0, 0]],
names=[None, 'b']))
assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = pd.date_range(freq="D", start="20180101", end="20180103",
tz="America/New_York")
df = pd.DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = pd.Series(ts,
index=pd.MultiIndex(levels=[['a', 'b', 'c'], ['A']],
codes=[[0, 1, 2], [0, 0, 0]]))
assert_series_equal(result, expected)
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 34 | 9987 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
kmike/scikit-learn | sklearn/feature_selection/rfe.py | 4 | 14193 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD Style.
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_arrays, safe_sqr, safe_mask
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
class RFE(BaseEstimator, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches.
Attributes
----------
`n_features_` : int
The number of selected features.
`support_` : array of shape [n_features]
The mask of selected features.
`ranking_` : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
`estimator_` : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_arrays(X, y, sparse_format="csr")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(self.step * n_features)
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(X[:, safe_mask(X, self.support_)])
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(X[:, safe_mask(X, self.support_)], y)
def transform(self, X):
"""Reduce X to the selected features during the elimination.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the features selected during the \
elimination.
"""
return X[:, safe_mask(X, self.support_)]
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
loss_function : function, optional (default=None)
The loss function to minimize by cross-validation. If None, then the
score function of the estimator is maximized.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
`n_features_` : int
The number of selected features with cross-validation.
`support_` : array of shape [n_features]
The mask of selected features.
`ranking_` : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
`cv_scores_` : array of shape [n_subsets_of_features]
The cross-validation scores such that
`cv_scores_[i]` corresponds to
the CV score of the i-th subset of features.
`estimator_` : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, loss_func=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.loss_func = loss_func
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_arrays(X, y, sparse_format="csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scores = np.zeros(X.shape[1])
# Cross-validation
n = 0
for train, test in cv:
# Compute a full ranking of the features
ranking_ = rfe.fit(X[train], y[train]).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X[train][:, mask], y[train])
if self.loss_func is None:
loss_k = 1.0 - estimator.score(X[test][:, mask], y[test])
else:
loss_k = self.loss_func(
y[test], estimator.predict(X[test][:, mask]))
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, loss=%f"
% (k, max(ranking_), loss_k))
scores[k] += loss_k
n += 1
# Pick the best number of features on average
best_score = np.inf
best_k = None
for k, score in enumerate(scores):
if score < best_score:
best_score = score
best_k = k + 1
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=best_k,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, safe_mask(X, rfe.support_)], y)
self.n_features_ = rfe.n_features_
self.support_ = rfe.support_
self.ranking_ = rfe.ranking_
self.cv_scores_ = scores / n
return self
| bsd-3-clause |
fegonda/icon_demo | code/model/unet/old/unet_multiChannel.py | 1 | 15594 | # yet another version of the IDSIA network
# based on code from keras tutorial
# http://keras.io/getting-started/sequential-model-guide/
from keras.models import Model, Sequential, model_from_json
from keras.layers import Dense, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D, UpSampling2D, merge, ZeroPadding2D, Dropout, Lambda
from keras.callbacks import EarlyStopping
from keras import backend as K
from keras.optimizers import SGD
from keras.regularizers import l2
from generate_data import *
import multiprocessing
import sys
import matplotlib
import matplotlib.pyplot as plt
# loosing independence of backend for
# custom loss function
import theano
import theano.tensor as T
from evaluation import Rand_membrane_prob
from theano.tensor.shared_randomstreams import RandomStreams
rng = np.random.RandomState(7)
train_samples = 20
val_samples = 10
learning_rate = 0.01
momentum = 0.95
doTrain = int(sys.argv[1])
patchSize = 572 #140
patchSize_out = 388 #132
weight_decay = 0.0
weight_class_1 = 1.
patience = 10
purpose = 'train'
nr_layers = 3
initialization = 'glorot_uniform'
filename = 'unet_3d'
print "filename: ", filename
srng = RandomStreams(1234)
# need to define a custom loss, because all pre-implementations
# seem to assume that scores over patch add up to one which
# they clearly don't and shouldn't
def unet_crossentropy_loss(y_true, y_pred):
epsilon = 1.0e-4
y_pred_clipped = T.clip(y_pred, epsilon, 1.0-epsilon)
loss_vector = -T.mean(weight_class_1*y_true * T.log(y_pred_clipped) + (1-y_true) * T.log(1-y_pred_clipped), axis=1)
average_loss = T.mean(loss_vector)
return average_loss
def unet_crossentropy_loss_sampled(y_true, y_pred):
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
indNeg = T.nonzero(1-y_true)[0]
# shuffle
n = indPos.shape[0]
indPos = indPos[srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[srng.permutation(n=n)]
# subset assuming each class has at least 100 samples present
indPos = indPos[:200]
indNeg = indNeg[:200]
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
average_loss = T.mean(loss_vector)
return average_loss
def unet_block_down(input, nb_filter, doPooling=True, doDropout=False):
# first convolutional block consisting of 2 conv layers plus activation, then maxpool.
# All are valid area, not same
act1 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(input)
act2 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(act1)
if doDropout:
act2 = Dropout(0.5)(act2)
if doPooling:
# now downsamplig with maxpool
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode="valid")(act2)
else:
pool1 = act2
return (act2, pool1)
# need to define lambda layer to implement cropping
# input is a tensor of size (batchsize, channels, width, height)
def crop_layer(x, cs):
cropSize = cs
return x[:,:,cropSize:-cropSize, cropSize:-cropSize]
def unet_block_up(input, nb_filter, down_block_out):
print "This is unet_block_up"
print "input ", input._keras_shape
# upsampling
up_sampled = UpSampling2D(size=(2,2))(input)
print "upsampled ", up_sampled._keras_shape
# up-convolution
conv_up = Convolution2D(nb_filter=nb_filter, nb_row=2, nb_col=2, subsample=(1,1),
init=initialization, activation='relu', border_mode="same", W_regularizer=l2(weight_decay))(up_sampled)
print "up-convolution ", conv_up._keras_shape
# concatenation with cropped high res output
# this is too large and needs to be cropped
print "to be merged with ", down_block_out._keras_shape
#padding_1 = int((down_block_out._keras_shape[2] - conv_up._keras_shape[2])/2)
#padding_2 = int((down_block_out._keras_shape[3] - conv_up._keras_shape[3])/2)
#print "padding: ", (padding_1, padding_2)
#conv_up_padded = ZeroPadding2D(padding=(padding_1, padding_2))(conv_up)
#merged = merge([conv_up_padded, down_block_out], mode='concat', concat_axis=1)
cropSize = int((down_block_out._keras_shape[2] - conv_up._keras_shape[2])/2)
down_block_out_cropped = Lambda(crop_layer, output_shape=conv_up._keras_shape[1:], arguments={"cs":cropSize})(down_block_out)
print "cropped layer size: ", down_block_out_cropped._keras_shape
merged = merge([conv_up, down_block_out_cropped], mode='concat', concat_axis=1)
print "merged ", merged._keras_shape
# two 3x3 convolutions with ReLU
# first one halves the feature channels
act1 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(merged)
print "conv1 ", act1._keras_shape
act2 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(act1)
print "conv2 ", act2._keras_shape
return act2
if doTrain:
# input data should be large patches as prediction is also over large patches
print
print "=== building network ==="
print "== BLOCK 1 =="
input = Input(shape=(nr_layers, patchSize, patchSize))
print "input ", input._keras_shape
block1_act, block1_pool = unet_block_down(input=input, nb_filter=64)
print "block1 act ", block1_act._keras_shape
print "block1 ", block1_pool._keras_shape
print "== BLOCK 2 =="
block2_act, block2_pool = unet_block_down(input=block1_pool, nb_filter=128)
print "block2 ", block2_pool._keras_shape
print "== BLOCK 3 =="
block3_act, block3_pool = unet_block_down(input=block2_pool, nb_filter=256)
print "block3 ", block3_pool._keras_shape
print "== BLOCK 4 =="
block4_act, block4_pool = unet_block_down(input=block3_pool, nb_filter=512, doDropout=True)
print "block4 ", block4_pool._keras_shape
print "== BLOCK 5 =="
print "no pooling"
block5_act, block5_pool = unet_block_down(input=block4_pool, nb_filter=1024, doDropout=True, doPooling=False)
print "block5 ", block5_pool._keras_shape
print "=============="
print
print "== BLOCK 4 UP =="
block4_up = unet_block_up(input=block5_act, nb_filter=512, down_block_out=block4_act)
print "block4 up", block4_up._keras_shape
print
print "== BLOCK 3 UP =="
block3_up = unet_block_up(input=block4_up, nb_filter=256, down_block_out=block3_act)
print "block3 up", block3_up._keras_shape
print
print "== BLOCK 2 UP =="
block2_up = unet_block_up(input=block3_up, nb_filter=128, down_block_out=block2_act)
print "block2 up", block2_up._keras_shape
print
print "== BLOCK 1 UP =="
block1_up = unet_block_up(input=block2_up, nb_filter=64, down_block_out=block1_act)
print "block1 up", block1_up._keras_shape
print "== 1x1 convolution =="
output = Convolution2D(nb_filter=1, nb_row=1, nb_col=1, subsample=(1,1),
init=initialization, activation='sigmoid', border_mode="valid")(block1_up)
print "output ", output._keras_shape
output_flat = Flatten()(output)
print "output flat ", output_flat._keras_shape
model = Model(input=input, output=output_flat)
#model = Model(input=input, output=block1_act)
sgd = SGD(lr=learning_rate, decay=0, momentum=momentum, nesterov=False)
#model.compile(loss='mse', optimizer=sgd)
model.compile(loss=unet_crossentropy_loss_sampled, optimizer=sgd)
data_val = generate_experiment_data_patch_prediction_layers(purpose='validate', nsamples=val_samples, patchSize=patchSize, outPatchSize=patchSize_out, nr_layers=nr_layers)
data_x_val = data_val[0].astype(np.float32)
data_x_val = np.reshape(data_x_val, [-1, nr_layers, patchSize, patchSize])
data_y_val = data_val[1].astype(np.float32)
data_label_val = data_val[2]
# start pool for data
print "Starting worker."
pool = multiprocessing.Pool(processes=1)
futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_patch_prediction_layers, purpose, train_samples, patchSize, patchSize_out, nr_layers]])
best_val_loss_so_far = 0
patience_counter = 0
for epoch in xrange(10000000):
print "Waiting for data."
data = futureData.get()
data_x = data[0].astype(np.float32)
data_x = np.reshape(data_x, [-1, nr_layers, patchSize, patchSize])
data_y = data[1].astype(np.float32)
print "got new data"
futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_patch_prediction_layers, purpose, train_samples, patchSize, patchSize_out, nr_layers]])
print "current learning rate: ", model.optimizer.lr.get_value()
model.fit(data_x, data_y, batch_size=1, nb_epoch=1)
im_pred = 1-model.predict(x=data_x_val, batch_size = 1)
mean_val_rand = 0
for val_ind in xrange(val_samples):
im_pred_single = np.reshape(im_pred[val_ind,:], (patchSize_out,patchSize_out))
im_gt = np.reshape(data_label_val[val_ind], (patchSize_out,patchSize_out))
validation_rand = Rand_membrane_prob(im_pred_single, im_gt)
mean_val_rand += validation_rand
mean_val_rand /= np.double(val_samples)
print "validation RAND ", mean_val_rand
json_string = model.to_json()
open(filename+'.json', 'w').write(json_string)
model.save_weights(filename+'_weights.h5', overwrite=True)
print mean_val_rand, " > ", best_val_loss_so_far
print mean_val_rand - best_val_loss_so_far
if mean_val_rand > best_val_loss_so_far:
best_val_loss_so_far = mean_val_rand
print "NEW BEST MODEL"
json_string = model.to_json()
open(filename+'_best.json', 'w').write(json_string)
model.save_weights(filename+'_best_weights.h5', overwrite=True)
patience_counter=0
else:
patience_counter +=1
# no progress anymore, need to decrease learning rate
if patience_counter == patience:
print "DECREASING LEARNING RATE"
print "before: ", learning_rate
learning_rate *= 0.1
print "now: ", learning_rate
model.optimizer.lr.set_value(learning_rate)
patience = 10
patience_counter = 0
# stop if not learning anymore
if learning_rate < 1e-7:
break
else:
start_time = time.clock()
network_file_path = 'to_evaluate/'
file_search_string = network_file_path + '*.json'
files = sorted( glob.glob( file_search_string ) )
pathPrefix = '/media/vkaynig/Data1/all_data/testing/AC4_small/'
#pathPrefix = '/media/vkaynig/Data1/all_data/testing/AC4/'
for file_index in xrange(np.shape(files)[0]):
print files[file_index]
model = model_from_json(open(files[file_index]).read())
weight_file = ('.').join(files[file_index].split('.')[:-1])
model.load_weights(weight_file+'_weights.h5')
model_name = os.path.splitext(os.path.basename(files[file_index]))[0]
# create directory
if not os.path.exists(pathPrefix+'boundaryProbabilities/'+model_name):
os.makedirs(pathPrefix+'boundaryProbabilities/'+model_name)
sgd = SGD(lr=0.01, decay=0, momentum=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
img_search_string = pathPrefix + 'gray_images/*.tif'
img_files = sorted( glob.glob( img_search_string ) )
for img_index in xrange(np.shape(img_files)[0]):
print img_files[img_index]
img_cs = int(np.floor(nr_layers/2))
img_valid_range_indices = np.clip(range(img_index-img_cs,img_index+img_cs+1),0,np.shape(img_files)[0]-1)
padding_ul = int(np.ceil((patchSize - patchSize_out)/2.0))
needed_ul_padding = patchSize - padding_ul
image = mahotas.imread(img_files[0])
# need large padding for lower right corner
paddedImage = np.pad(image, patchSize, mode='reflect')
paddedImage = paddedImage[needed_ul_padding:, needed_ul_padding:]
paddedSize = paddedImage.shape
layer_image = np.zeros((nr_layers,paddedSize[0],paddedSize[1]))
for ind, read_index in enumerate(img_valid_range_indices):
image = mahotas.imread(img_files[read_index])
image = normalizeImage(image)
image = image - 0.5
paddedImage = np.pad(image, patchSize, mode='reflect')
paddedImage = paddedImage[needed_ul_padding:, needed_ul_padding:]
layer_image[ind] = paddedImage
probImage = np.zeros(image.shape)
# count compilation time to init
row = 0
col = 0
patch = layer_image[:,row:row+patchSize,col:col+patchSize]
data = np.reshape(patch, (1,nr_layers,patchSize,patchSize))
probs = model.predict(x=data, batch_size=1)
init_time = time.clock()
#print "Initialization took: ", init_time - start_time
probImage_tmp = np.zeros(image.shape)
for row in xrange(0,image.shape[0],patchSize_out):
for col in xrange(0,image.shape[1],patchSize_out):
patch = layer_image[:,row:row+patchSize,col:col+patchSize]
data = np.reshape(patch, (1,nr_layers,patchSize,patchSize))
probs = 1-model.predict(x=data, batch_size = 1)
probs = np.reshape(probs, (patchSize_out,patchSize_out))
row_end = patchSize_out
if row+patchSize_out > probImage.shape[0]:
row_end = probImage.shape[0]-row
col_end = patchSize_out
if col+patchSize_out > probImage.shape[1]:
col_end = probImage.shape[1]-col
probImage_tmp[row:row+row_end,col:col+col_end] = probs[:row_end,:col_end]
probImage = probImage_tmp
print pathPrefix+'boundaryProbabilities/'+model_name+'/'+str(img_index).zfill(4)+'.tif'
mahotas.imsave(pathPrefix+'boundaryProbabilities/'+model_name+'/'+str(img_index).zfill(4)+'.tif', np.uint8(probImage*255))
end_time = time.clock()
print "Prediction took: ", end_time - init_time
print "Speed: ", 1./(end_time - init_time)
print "Time total: ", end_time-start_time
print "min max output ", np.min(probImage), np.max(probImage)
| mit |
krzjoa/sciquence | sciquence/shapelets/shapelet_classifier.py | 1 | 1335 | # -*- coding: utf-8 -*-
# Krzysztof Joachimiak 2017
# sciquence: Time series & sequences in Pythonn
#
# Shapelet processor
# Author: Krzysztof Joachimiak
#
# License: MIT
from shapelet_utils import all_candidates
from sklearn.preprocessing import scale
from sklearn.base import BaseEstimator
class ShapeletClassifier(BaseEstimator):
'''
Shapelet tree-based classifier.
Parameters
----------
min: int
Minimal length of window
max: int
Maximal length of window
step: int
Window step in process of looking for potential shapelets
Examples
--------
>>> from sciquence.shapelets import ShapeletClassifier
>>> from sklearn.model_selection import train_test_split
>>> sc = ShapeletClassifier(min=5, max=10)
>>> X, y = load_dataset()
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> prediction = sc.fit(X_train, y_train).predict(X_test, y_test)
References
----------
Ye L., Keogh E. (2009).
Time Series Shapelets: A New Primitive for Data Mining
http://alumni.cs.ucr.edu/~lexiangy/Shapelet/kdd2009shapelet.pdf
'''
def __init__(self, min, max):
raise NotImplemented
self.min = min
self.max = max
def fit(self, X, y):
pass
def predict(self, X):
pass | mit |
ryi06/inferelator_ng | inferelator_ng/tests/test_results_processor.py | 3 | 11794 | import unittest
from .. import results_processor
import pandas as pd
import numpy as np
class TestResultsProcessor(unittest.TestCase):
def test_combining_confidences_one_beta(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[0.5, 0], [0.5, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta], [beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[0.5, 0.0], [0.5, 1.0]]))
def test_combining_confidences_one_beta_invariant_to_rescale_division(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[1, 0], [1, 2]]), ['gene1', 'gene2'], ['tf1','tf2'])
rescaled_beta = pd.DataFrame((beta / 3.0), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta], [rescaled_beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[0.5, 0.0], [0.5, 1.0]]))
def test_combining_confidences_one_beta_all_negative_values(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[-1, -.5, -3], [-1, -2, 0]]), ['gene1', 'gene2'], ['tf1','tf2', 'tf3'])
rescaled_beta = pd.DataFrame([[0.2, 0.1, 0.4], [0.3, 0.5, 0]], ['gene1', 'gene2'], ['tf1','tf2', 'tf3'])
rp = results_processor.ResultsProcessor([beta], [rescaled_beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[0.4, 0.2, 0.8], [0.6, 1.0, 0]]))
def test_combining_confidences_one_beta_with_negative_values(self):
# data was taken from a subset of row 42 of b subtilis run
beta = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta], [rescaled_beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[ 0.75, 0, 0.25, 1, 0.5 ]]))
def test_combining_confidences_two_betas_negative_values(self):
# data was taken from a subset of row 42 of b subtilis run
beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta1, beta2], [rescaled_beta1, rescaled_beta2])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[ 0.1, 0. , 0. , 0.3, 0.6]]))
def test_combining_confidences_two_betas_negative_values_assert_nonzero_betas(self):
# data was taken from a subset of row 42 of b subtilis run
beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta1, beta2], [rescaled_beta1, rescaled_beta2])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(rp.betas_non_zero, np.array([[1 ,1, 2, 1, 2]]))
def test_combining_confidences_two_betas_negative_values_assert_sign_betas(self):
# data was taken from a subset of row 42 of b subtilis run
beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta1, beta2], [rescaled_beta1, rescaled_beta2])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(rp.betas_sign, np.array([[-1 ,1, 2, -1, 2]]))
def test_threshold_and_summarize_one_beta(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1], [beta1])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,0]]))
def test_threshold_and_summarize_two_betas(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [0.5, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2], [beta1, beta2])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,1]]))
def test_threshold_and_summarize_three_betas(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta3 = pd.DataFrame(np.array([[0.5, 0.2], [0.5, 0.1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2, beta3], [beta1, beta2, beta3])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,0]]))
def test_threshold_and_summarize_three_betas_negative_values(self):
beta1 = pd.DataFrame(np.array([[1, 0], [-0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [-0.5, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta3 = pd.DataFrame(np.array([[-0.5, 0.2], [-0.5, 0.1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2, beta3], [beta1, beta2, beta3])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,1]]))
####################
# TODO: Fix the following three tests so that they have unique and correct precision recall values
####################
def test_precision_recall_perfect_prediction(self):
gs = pd.DataFrame(np.array([[1, 0], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0.5, 1. ])
np.testing.assert_equal(precision, [ 1., 1., 1.])
def test_precision_recall_prediction_off(self):
gs = pd.DataFrame(np.array([[1, 0], [0, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0.5, 0.5, 1., 1. ])
np.testing.assert_equal(precision, [ 1., 1., 0.5, 2./3, 0.5])
def test_precision_recall_bad_prediction(self):
gs = pd.DataFrame(np.array([[0, 1], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0, 0.5]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0., 0., 0.5, 1. ])
np.testing.assert_equal(precision, [ 0., 0., 0., 1./3, 0.5,])
def test_aupr_perfect_prediction(self):
gs = pd.DataFrame(np.array([[1, 0], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_equal(aupr, 1.0)
def test_negative_gs_aupr_perfect_prediction(self):
gs = pd.DataFrame(np.array([[-1, 0], [-1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_equal(aupr, 1.0)
def test_negative_gs_precision_recall_bad_prediction(self):
gs = pd.DataFrame(np.array([[0, -1], [-1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0, 0.5]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0., 0., 0.5, 1. ])
np.testing.assert_equal(precision, [ 0., 0., 0., 1./3, 0.5,])
def test_aupr_prediction_off(self):
gs = pd.DataFrame(np.array([[1, 0], [0, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_equal(aupr, 19./24)
def test_aupr_bad_prediction(self):
gs = pd.DataFrame(np.array([[0, 1], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0, 0.5]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_approx_equal(aupr, 7./24)
def test_mean_and_median(self):
beta1 = pd.DataFrame(np.array([[1, 1], [1, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[2, 2], [2, 2]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2], [beta1, beta2])
mean, median = rp.mean_and_median(rp.betas)
np.testing.assert_equal(mean, np.array([[ 1.5, 1.5],[ 1.5, 1.5]]))
np.testing.assert_equal(median, np.array([[ 1.5, 1.5],[ 1.5, 1.5]])) | bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.