repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
jswanljung/iris | docs/iris/example_code/Meteorology/hovmoller.py | 6 | 1622 | """
Hovmoller diagram of monthly surface temperature
================================================
This example demonstrates the creation of a Hovmoller diagram with fine control
over plot ticks and labels. The data comes from the Met Office OSTIA project
and has been pre-processed to calculate the monthly mean sea surface
temperature.
"""
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# load a single cube of surface temperature between +/- 5 latitude
fname = iris.sample_data_path('ostia_monthly.nc')
cube = iris.load_cube(fname,
iris.Constraint('surface_temperature',
latitude=lambda v: -5 < v < 5))
# Take the mean over latitude
cube = cube.collapsed('latitude', iris.analysis.MEAN)
# Now that we have our data in a nice way, lets create the plot
# contour with 20 levels
qplt.contourf(cube, 20)
# Put a custom label on the y axis
plt.ylabel('Time / years')
# Stop matplotlib providing clever axes range padding
plt.axis('tight')
# As we are plotting annual variability, put years as the y ticks
plt.gca().yaxis.set_major_locator(mdates.YearLocator())
# And format the ticks to just show the year
plt.gca().yaxis.set_major_formatter(mdates.DateFormatter('%Y'))
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
bderembl/mitgcm_configs | eddy_iwave/analysis/azimuthal_average.py | 1 | 9428 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.interpolate as spint
import scipy.spatial.qhull as qhull
import itertools
import MITgcmutils as mit
import f90nml
plt.ion()
def interp_weights(xyz, uvw):
naux,d = xyz.shape
tri = qhull.Delaunay(xyz)
simplex = tri.find_simplex(uvw)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = np.take(tri.transform, simplex, axis=0)
delta = uvw - temp[:, d]
bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)
return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))
def interpolate(values, vtx, wts, fill_value=np.nan):
ret = np.einsum('nj,nj->n', np.take(values, vtx), wts)
ret[np.any(wts < 0, axis=1)] = fill_value
return ret
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
dir0 = '/run/media/bderembl/girtab/eddy-iwave/run13/'
#dir0 = '/home/bderembl/work/MITgcm/mitgcm_configs/eddy_iwave/run/'
file1 = 'diagU*'
file2 = 'diagV*'
file3 = 'diagSurf*'
file4 = 'U*'
file5 = 'V*'
file6 = 'W*'
#%==================== LOAD GRID ===================================
nml = f90nml.read(dir0+'data')
nmldiag = f90nml.read(dir0+'data.diagnostics')
# load grid
XC = mit.rdmds(dir0+'XC*')
YC = mit.rdmds(dir0+'YC*')
XG = mit.rdmds(dir0+'XG*')
YG = mit.rdmds(dir0+'YG*')
hFacC = mit.rdmds(dir0+'hFacC*')
hFacS = mit.rdmds(dir0+'hFacS*')
hFacW = mit.rdmds(dir0+'hFacW*')
RAS = mit.rdmds(dir0+'RAS*')
RAW = mit.rdmds(dir0+'RAW*')
RAC = mit.rdmds(dir0+'RAC*')
RAZ = mit.rdmds(dir0+'RAZ*')
RC = mit.rdmds(dir0+'RC*')
RF = mit.rdmds(dir0+'RF*')
DRC = mit.rdmds(dir0+'DRC*')
DRF = mit.rdmds(dir0+'DRF*')
Depth = mit.rdmds(dir0+'Depth*')
dt = nml['parm03']['deltat']
f0 = nml['parm01']['f0']
hFacC2 = np.where(hFacC != 1, np.nan,1.0)
hFacS2 = np.where(hFacS != 1, np.nan,1.0)
hFacW2 = np.where(hFacW != 1, np.nan,1.0)
iz = np.argmin(np.abs(RC+np.min(Depth)))
si_z,si_y,si_x = hFacC.shape
Lx = XC[-1,-1] + XC[0,0]
Ly = YC[-1,-1] + YC[0,0]
dx = 2*XC[0,0]
dy = 2*YC[0,0]
xy_g = np.vstack((XG.flatten(), YG.flatten())).T
xy_u = np.vstack((XG.flatten(), YC.flatten())).T
xy_v = np.vstack((XC.flatten(), YG.flatten())).T
xy_c = np.vstack((XC.flatten(), YC.flatten())).T
iters1 = mit.mds.scanforfiles(dir0 + file1)
iters4 = mit.mds.scanforfiles(dir0 + file4)
# ==== eddy parameters (cf. mygendata) ========
x_c = Lx/2
y_c = Ly/2
R0 = 14e3
velmax = 0.1
rr = np.linspace(0.0,0.5*Lx, np.int(0.5*si_x)+1)
theta = np.linspace(0.0,2*np.pi, np.int(np.pi*Lx/dx))
theta = theta[:-1]
rg,tg = np.meshgrid(rr,theta)
si_t,si_r = rg.shape
dr = rr[1] - rr[0]
rr2 = rr[:-1] + rr[1:]
x_rt = rg*np.cos(tg) + x_c
y_rt = rg*np.sin(tg) + y_c
xy_rt = np.vstack((x_rt.flatten(), y_rt.flatten())).T
vtx_g, wts_g = interp_weights(xy_g, xy_rt)
vtx_u, wts_u = interp_weights(xy_u, xy_rt)
vtx_v, wts_v = interp_weights(xy_v, xy_rt)
vtx_c, wts_c = interp_weights(xy_c, xy_rt)
# grid at U,V,T points
rad_gg = np.sqrt((XG-x_c)**2 + (YG-y_c)**2)
rad_cc = np.sqrt((XC-x_c)**2 + (YC-y_c)**2)
rad_gu = np.sqrt((XG-x_c)**2 + (YC-y_c)**2)
rad_gv = np.sqrt((XC-x_c)**2 + (YG-y_c)**2)
theta_gg = np.arctan2(YG-y_c,XG-x_c)
theta_cc = np.arctan2(YC-y_c,XC-x_c)
theta_gu = np.arctan2(YC-y_c,XG-x_c)
theta_gv = np.arctan2(YG-y_c,XC-x_c)
# vortex
def vel_rankine(rr):
v = -velmax*np.tanh(rr/R0)/(np.cosh(rr/R0))**2/(np.tanh(1.0)/(np.cosh(1.0))**2)
v = np.where(rr == 0, 0.0,v)
return v
#%==================== LOAD FIELDS ===================================
i = 1
udissv = mit.rdmds(dir0 + file1,iters1[i],rec=1)
vdissv = mit.rdmds(dir0 + file2,iters1[i],rec=1)
uvel = mit.rdmds(dir0 + file4,iters4[i])
vvel = mit.rdmds(dir0 + file5,iters4[i])
wvel = mit.rdmds(dir0 + file6,iters4[i])
#uvel0 = mit.rdmds(dir0 + file4,iters4[i-1])
#vvel0 = mit.rdmds(dir0 + file5,iters4[i-1])
ur_me = np.zeros((si_z,si_r))
ut_me = np.zeros((si_z,si_r))
ur_me1 = np.zeros((si_z,si_r))
ut_me1 = np.zeros((si_z,si_r))
w_me1 = np.zeros((si_z,si_r))
#ut0_me = np.zeros((si_z,si_r))
urdissv_me = np.zeros((si_z,si_r))
utdissv_me = np.zeros((si_z,si_r))
stress1 = np.zeros((si_z+1,si_r))
stress2 = np.zeros((si_z,si_r))
stress3 = np.zeros((si_z+1,si_r))
# set topography points to nans
uvel = uvel*hFacW2
vvel = vvel*hFacS2
wvel = wvel*hFacC2
for k in range(0,si_z-1):
udissv[k,:,:] = (udissv[k+1,:,:] - udissv[k,:,:])/(RAW*DRF[k]*hFacW[k,:,:])
vdissv[k,:,:] = (vdissv[k+1,:,:] - vdissv[k,:,:])/(RAS*DRF[k]*hFacS[k,:,:])
udissv[si_z-1,:,:] = 0.0
vdissv[si_z-1,:,:] = 0.0
for k in range(0,si_z-1):
uvel_pol = interpolate(uvel[k,:,:], vtx_u, wts_u).reshape((si_t,si_r))
vvel_pol = interpolate(vvel[k,:,:], vtx_v, wts_v).reshape((si_t,si_r))
uvel_pol1 = interpolate(uvel[k+1,:,:], vtx_u, wts_u).reshape((si_t,si_r))
vvel_pol1 = interpolate(vvel[k+1,:,:], vtx_v, wts_v).reshape((si_t,si_r))
wvel_pol1 = interpolate(wvel[k+1,:,:], vtx_c, wts_c).reshape((si_t,si_r))
udissv_pol = interpolate(udissv[k,:,:], vtx_u, wts_u).reshape((si_t,si_r))
vdissv_pol = interpolate(vdissv[k,:,:], vtx_v, wts_v).reshape((si_t,si_r))
# u and v at vertical cell face
uvel_pol1 = 0.5*(uvel_pol + uvel_pol1)
vvel_pol1 = 0.5*(vvel_pol + vvel_pol1)
ur = np.cos(tg)*uvel_pol + np.sin(tg)*vvel_pol
ut = -np.sin(tg)*uvel_pol + np.cos(tg)*vvel_pol
ur1 = np.cos(tg)*uvel_pol1 + np.sin(tg)*vvel_pol1
ut1 = -np.sin(tg)*uvel_pol1 + np.cos(tg)*vvel_pol1
urdissv = np.cos(tg)*udissv_pol + np.sin(tg)*vdissv_pol
utdissv = -np.sin(tg)*udissv_pol + np.cos(tg)*vdissv_pol
ur_me[k,:] = np.nanmean(ur,axis=0)
ut_me[k,:] = np.nanmean(ut,axis=0)
ur_me1[k,:] = np.nanmean(ur1,axis=0)
ut_me1[k,:] = np.nanmean(ut1,axis=0)
w_me1 [k,:] = np.nanmean(wvel_pol1,axis=0)
urdissv_me[k,:] = np.nanmean(urdissv,axis=0)
utdissv_me[k,:] = np.nanmean(utdissv,axis=0)
# uvel_pol = interpolate(uvel0[k,:,:], vtx_u, wts_u).reshape((si_t,si_r))
# vvel_pol = interpolate(vvel0[k,:,:], vtx_v, wts_v).reshape((si_t,si_r))
# ut0 = -np.sin(tg)*uvel_pol + np.cos(tg)*vvel_pol
# ut0_me[k,:] = np.nanmean(ut0,axis=0)
stress1[k+1,:] = -np.nanmean((ut1 - ut_me1[k,:])*(wvel_pol1 - w_me1[k,:]),axis=0)
stress2[k,:] = -np.nanmean(rr.reshape((1,si_r))*(ut - ut_me[k,:])*(ur - ur_me[k,:]),axis=0)
# minus DRF because diff done downward
stressdiv1 = np.diff(stress1,1,0)/(-DRF[:,0,:])
stressdiv2 = 1/rr2.reshape((1,si_r-1))*np.diff(stress2,1,1)/dr
stressdiv = stressdiv1[:,1:] + stressdiv2
dutdz = np.diff(ut_me,1,0)/(-DRF[:-1,0,:])
#================ Plot part ================
def rzplot(psi,*args, **kwargs):
vmax = np.max(np.abs((psi)))
vmax = kwargs.get('vmax', vmax)
vmin = -vmax
psi = np.where(psi<vmin,vmin,psi)
psi = np.where(psi>vmax,vmax,psi)
title = kwargs.get('title',None)
plt.figure()
plt.contourf(rr*1e-3,RC[:,0,0],psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax,extend='both')
plt.colorbar(format='%.0e')
plt.contour(rr*1e-3,RC[:,0,0],ut_me,np.linspace(-0.2,0.2,17),colors='k',linewidths=0.5)
plt.xlabel('r (km)')
plt.ylabel('z (m)')
plt.title(title)
vmaxall = 3e-7
psi = -f0*ur_me
rzplot(psi,title=r"$-fU_r$ (m\,s$^{-2}$)",vmax=vmaxall)
plt.savefig('ucori.png',bbox_inches='tight')
# psi = rr*ut_me + 0.5*f0*rr**2
# rzplot(psi,title=r"$\lambda$ (m$^2$\,s$^{-1}$)")
# #psi = (ut_me-vel_rankine(rr))/(iters4[1]*dt)
# psi = (ut_me-ut0_me)/((iters4[1]-iters4[0])*dt)
# rzplot(psi,title=r"$du_\theta/dt$ (m\,s$^{-2}$)",vmax=vmaxall)
psi = stressdiv1
rzplot(psi,title=r"$\partial \overline{u'_\theta w'}/\partial z$ (m\,s$^{-2}$)",vmax=vmaxall)
plt.savefig('dupwpdz.png',bbox_inches='tight')
psi = utdissv_me
rzplot(psi,title=r"$\nu d^2 u_\theta/dz^2$ (m\,s$^{-2}$)",vmax=vmaxall)
plt.savefig('uvisc.png',bbox_inches='tight')
# vmin = -3e-7
# vmax = -vmin
# psi = stressdiv1[:iz-1,:]
# psi = np.where(psi<vmin,vmin,psi)
# psi = np.where(psi>vmax,vmax,psi)
# plt.figure()
# plt.contourf(rr*1e-3,RC[:iz-1,0,0],psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax)
# plt.colorbar(format='%.0e',label='m/s2')
# plt.contour(rr*1e-3,RC[:,0,0],ut_me,5,colors='k',linewidths=0.5)
# plt.xlabel('r (km)')
# plt.ylabel('z (m)')
# plt.title(r"$\partial \overline{u'_\theta w'}/\partial z$")
# vmin = -3e-7
# vmax = -vmin
# #psi = (ut_me[:iz-1,:]-vel_rankine(rr))/(iters1[1]*dt)
# #psi = -(uvel[:iz-1,499:,500]-uvel0[:iz-1,499:,500])/(iters1[1]*dt)
# #psi = -(udiss[:iz-1,499:,500])
# plt.figure()
# plt.contourf(rr*1e-3,RC[:iz-1,0,0],psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax,extend='both')
# plt.colorbar(format='%.0e',label='m/s2')
# plt.contour(rr*1e-3,RC[:,0,0],ut_me,5,colors='k',linewidths=0.5)
# plt.xlabel('r (km)')
# plt.ylabel('z (m)')
# plt.title(r"$\nu d^2 u_\theta/dz^2$")
# # fit
# # itaudrag = stressdiv1[:iz-1,:]/ut_me[:iz-1,:]
# # # mean over the eddy
# # itaudrag_me = itaudrag[:,50:250].mean(axis=1)
# # #plt.contourf(rr*1e-3,RC[:iz-1,0,0],itaudrag,100,cmap=plt.cm.seismic)
# # #plt.colorbar(format='%.0e',label='1/s')
# # plt.figure();
# # plt.plot(RC[:iz-1,0,0],1/(itaudrag_me*86400))
# # # fit
# # plt.plot(RC[:iz-1,0,0],1/(-0.3*(np.exp((-RC[:iz-1,0,0]-3800)/100)-1.6e-5*RC[:iz-1,0,0])))
# d2udz2 = np.diff(np.diff(uvel,1,0),1,0)/DRF[1:-1]**2
# nu = 1e-2
# plt.figure()
# plt.pcolormesh(nu*d2udz2[:iz-1,499:,500],cmap=plt.cm.seismic,vmin=vmin,vmax=vmax)
# plt.colorbar(format='%.0e',label='m/s2')
| mit |
cuttlefishh/papers | red-sea-single-cell-genomes/code/make_rarefaction_plots_tara.py | 1 | 11809 | #!/usr/bin/env python
import click
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
# Function: Randomize columns order of pandas DataFrame
def randomize_df_column_order(df):
cols = df.columns.tolist()
np.random.shuffle(cols)
df_copy = df[cols]
return df_copy
# Function: Count cumulative true values in each row by column
def make_cumulative_frame(df):
prev_count = pd.Series(data=0, index=df.index)
cumulative_frame = pd.DataFrame(data=0, index=df.index, columns=df.columns)
for sample in df.columns:
cumulative_frame[sample] = prev_count + df[sample]
prev_count = cumulative_frame[sample]
return cumulative_frame
# Function: Find best (greatest) cumulative sum
def find_best_cumulative_frame(df):
# initialize values
prev_count = pd.Series(data=0, index=df.index)
col_list = []
dfb = pd.DataFrame(data=df.values, index=df.index, columns=df.columns)
# go through selection for as many samples exist
for sample in dfb.columns:
# add prev_count to dfb
dfc = dfb.add(prev_count.tolist(), axis=0)
# find column with most values > 0
dfd = dfc > 0
num_gt_zero = dfd.sum()
best = num_gt_zero.idxmax()
best_value = num_gt_zero.max()
# set best column as new prev_count and make it last column in new df
prev_count = dfc[best]
col_list.append(prev_count)
# remove column from dfb
dfb.pop(best)
# make new df
cumulative_frame = pd.concat(col_list, axis=1)
return cumulative_frame
# Function: Find worst (least) cumulative sum
def find_worst_cumulative_frame(df):
# initialize values
prev_count = pd.Series(data=0, index=df.index)
col_list = []
dfb = pd.DataFrame(data=df.values, index=df.index, columns=df.columns)
# go through selection for as many samples exist
for sample in dfb.columns:
# add prev_count to dfb
dfc = dfb.add(prev_count.tolist(), axis=0)
# find column with most values > 0
dfd = dfc > 0
num_gt_zero = dfd.sum()
worst = num_gt_zero.idxmin()
worst_value = num_gt_zero.min()
# set best column as new prev_count and make it last column in new df
prev_count = dfc[worst]
col_list.append(prev_count)
# remove column from dfb
dfb.pop(worst)
# make new df
cumulative_frame = pd.concat(col_list, axis=1)
return cumulative_frame
@click.command()
@click.option('--num_samples', required=True, type=click.INT, help='Number of samples, e.g. 139 for all prokaryotic samples')
@click.option('--species', required=True, type=click.STRING, help='species (pelag or proch) corresponding to cluster counts input files, e.g. pelag_004_SRF_0.22-1.6_1e-5')
@click.option('--evalue', required=True, type=click.STRING, help='e-value cutoff corresponding to cluster counts input files, e.g. pelag_004_SRF_0.22-1.6_1e-5')
@click.option('--clusters_set', required=True, type=click.STRING, help='Clusters set that is given by --clusters_path, e.g. RSonly or all')
@click.option('--clusters_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Clusters file to check for presence, e.g. ~/singlecell/clusters/orthomcl-sar4/groups.RSonly_sar.list')
@click.option('--permutations', required=True, type=int, help='Number of rarefaction curve permutations to draw')
@click.option('--plot_redsea', is_flag=True, help='Provide this flag if you want to superimpose rarafaction curve where Red Sea Tara samples are explored first')
# num_samples = 139
# species = 'proch'
# evalue = '1e-5'
# clusters_set = 'all'
# clusters_path = '~/singlecell/clusters/orthomcl-pro4/groups.all_pro.list'
# permutations = 100
# Main function
def rarefaction(num_samples, species, evalue, clusters_set, clusters_path, permutations, plot_redsea):
"""Make rarefaction curve for presence of gene clusters in Tara samples"""
# Paths of input files, containing cluster counts
paths = pd.Series.from_csv('~/singlecell/tara/paths_%s_%s.list' % (species, evalue), header=-1, sep='\t', index_col=None)
# Data frame containing all samples cluster counts (NaN if missing)
pieces = []
for path in paths:
fullpath = "~/singlecell/tara/PROK-139/%s" % path
counts = pd.DataFrame.from_csv(fullpath, header=-1, sep='\t', index_col=0)
pieces.append(counts)
frame = pd.concat(pieces, axis=1)
headings = paths.tolist()
frame.columns = headings
# Dataframe of clusters
clusters = pd.Series.from_csv(clusters_path, header=-1, sep='\t', index_col=None)
clusters_frame = frame.loc[clusters]
# Check if counts are greater than zero (presence/absence)
bool_frame = clusters_frame > 0
# Plot best and worst curves
cum_frame_best = find_best_cumulative_frame(bool_frame)
cum_bool_best = cum_frame_best > 0
cum_sum_best = cum_bool_best.sum()
num_samples_plus_one = num_samples + 1
plt.plot(range(1, num_samples_plus_one), cum_sum_best, color='b', label='Best-case scenario')
cum_frame_worst = find_worst_cumulative_frame(bool_frame)
cum_bool_worst = cum_frame_worst > 0
cum_sum_worst = cum_bool_worst.sum()
plt.plot(range(1, num_samples_plus_one), cum_sum_worst, color='b', label='Worst-case scenario')
plt.fill_between(range(1, num_samples_plus_one), cum_sum_best, cum_sum_worst, facecolor='blue', alpha=0.3, label=None)
# Save list of clusters not found
final_result = cum_bool_best.iloc[0:,-1:]
genes_not_found = final_result.loc[final_result.all(axis=1) == False].index.values
np.savetxt('missing_%s_%s_%s.txt' % (species, clusters_set, evalue), genes_not_found, fmt="%s", delimiter=',')
# Plot rarefaction curve of avg +/- std of N random sample orders
cum_list = []
for i in range(0, permutations):
# Randomize sample columns (rarefaction step)
bool_frame_copy = randomize_df_column_order(bool_frame)
# Replace column names with sequential numbers (prevents inconsistent plotting)
bool_frame_copy.columns = range(0, len(bool_frame_copy.columns))
# Go thru randomized columns and count cumulative times clusters found
cumulative_frame = make_cumulative_frame(bool_frame_copy)
# Check if cumulative count is greater than zero (presence/absence)
cumulative_bool = cumulative_frame > 0
# Sum cumulative count
cumulative_sum = cumulative_bool.sum()
# Add cumulutive sum to list
cum_list.append(cumulative_sum)
# Make new df
cum_frame = pd.concat(cum_list, axis=1)
# Calc avg and std of df
cum_mean = cum_frame.mean(axis=1)
cum_std = cum_frame.std(axis=1)
# Plot
plt.plot(range(1, num_samples_plus_one), cum_mean, color='yellow', linestyle='--', label='Mean of cumulative gene counts')
plt.plot(range(1, num_samples_plus_one), cum_mean + cum_std, color='yellow', label=' plus standard deviation')
plt.plot(range(1, num_samples_plus_one), cum_mean - cum_std, color='yellow', label=' minus standard deviation')
plt.fill_between(range(1, num_samples_plus_one), cum_mean + cum_std, cum_mean - cum_std, facecolor='yellow', alpha=0.5)
# Plot rarefaction curve for RS samples
if plot_redsea:
redsea = ['031_SRF_0.22-1.6', '032_SRF_0.22-1.6', '032_DCM_0.22-1.6', '033_SRF_0.22-1.6', '034_SRF_0.22-1.6', '034_DCM_0.22-1.6']
redsea = [species + '_' + s + '_' + evalue for s in redsea]
cum_list = []
for i in range(0, permutations):
last_six_columns = []
last_six_columns.append([str(w) for w in random.sample(redsea, len(redsea))])
#last_six_columns = random.shuffle(redsea) <== doesn't work
bool_frame_copy = randomize_df_column_order(bool_frame)
old_columns = bool_frame_copy.columns
nonrs_columns = old_columns.drop(last_six_columns[0])
new_columns = nonrs_columns.tolist() + last_six_columns[0]
new_frame = pd.DataFrame(data=bool_frame_copy, columns=new_columns)
# Replace column names with sequential numbers (prevents inconsistent plotting)
new_frame.columns = range(0, len(new_frame.columns))
# Go thru randomized columns and count cumulative times clusters found
cumulative_frame = make_cumulative_frame(new_frame)
# Check if cumulative count is greater than zero (presence/absence)
cumulative_bool = cumulative_frame > 0
# Sum cumulative count
cumulative_sum = cumulative_bool.sum()
# Add cumulutive sum to list
cum_list.append(cumulative_sum)
# If we want to know how many clusters are added by last six samples...
# print cum_list
# Make new df
cum_frame = pd.concat(cum_list, axis=1)
# Calc avg and std of df
cum_mean = cum_frame.mean(axis=1)
cum_std = cum_frame.std(axis=1)
# Plot
plt.plot(range(1, num_samples_plus_one), cum_mean, color='red', linestyle='-', label='Mean, 6 Red Sea samples last')
#plt.plot(range(1, num_samples_plus_one), cum_mean + cum_std, color='red', label=' plus standard deviation')
#plt.plot(range(1, num_samples_plus_one), cum_mean - cum_std, color='red', label=' minus standard deviation')
#plt.fill_between(range(1, num_samples_plus_one), cum_mean + cum_std, cum_mean - cum_std, facecolor='red', alpha=0.5)
plt.annotate('Red Sea samples added', xy=(num_samples-6, cum_mean[num_samples-6]), xytext=(num_samples-20, cum_mean[num_samples-1]*1.05), horizontalalignment='right', arrowprops=dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=90,rad=10"))
# Plot number of clusters as horizontal line
num_genes = len(clusters)
plt.axhline(y=num_genes, color='k', linestyle='--', label='Total clusters (%s)' % num_genes)
#plt.text(22, num_genes*0.95, 'Number %s clusters (%s)' % (clusters_set, num_genes))
found_genes = cumulative_sum.max()
#plt.text(22, found_genes*1.05, 'Found %s clusters (%s)' % (clusters_set, found_genes))
plt.annotate('', xy=(num_samples, found_genes), xytext=(num_samples, num_genes), arrowprops={'arrowstyle': '<->'})
diff_genes = num_genes - found_genes
plt.text(5, (num_genes+found_genes)/2, 'Clusters found: %s' % (found_genes))
plt.text(num_samples-3, (num_genes+found_genes)/2, 'Not found: %s' % (diff_genes), horizontalalignment='right')
# Add the legend with some customizations
legend = plt.legend(loc='lower right', shadow=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set label fontsize
for label in legend.get_texts():
label.set_fontsize('medium')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
# Format and save plot
plt.xlabel('Number of Tara samples added')
plt.ylabel('Number of %s clusters found (e-value: %s)' % (clusters_set, evalue))
plt.title('Rarefaction curve: %s (e-value: %s)' % (species, evalue))
#plt.grid(TRUE)
ymax = num_genes - (num_genes % 100) + 100
plt.axis([0, num_samples+10, 0, ymax])
# if species == 'proch':
# plt.axis([0, num_samples+10, -20, ymax])
# plt.axhline(y=0, color='k')
plt.savefig('cum_%s_%s_%s.pdf' % (species, clusters_set, evalue))
# Save best and worst cumulative sum lists to csv
cum_sum_best.to_csv('cum_sum_best_%s_%s_%s.csv' % (species, clusters_set, evalue))
cum_sum_worst.to_csv('cum_sum_worst_%s_%s_%s.csv' % (species, clusters_set, evalue))
if __name__ == '__main__':
rarefaction()
| mit |
phoebe-project/phoebe2-docs | 2.0/tutorials/RV.py | 1 | 5401 | #!/usr/bin/env python
# coding: utf-8
# 'rv' Datasets and Options
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.0,<2.1"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Dataset Parameters
# --------------------------
#
# Let's create the ParameterSets which would be added to the Bundle when calling add_dataset. Later we'll call add_dataset, which will create and attach both these ParameterSets for us.
# In[3]:
ps, constraints = phoebe.dataset.rv()
print ps
# In[4]:
ps_dep = phoebe.dataset.rv_dep()
print ps_dep
# For information on these passband-dependent parameters, see the section on the [lc dataset](LC) (these are used only to compute fluxes when rv_method=='flux-weighted')
# ### times
# In[5]:
print ps['times']
# ### rvs
# In[6]:
print ps['rvs']
# ### sigmas
# In[7]:
print ps['sigmas']
# Compute Options
# ------------------
#
# Let's look at the compute options (for the default PHOEBE 2 backend) that relate to the RV dataset.
#
# Other compute options are covered elsewhere:
# * parameters related to dynamics are explained in the section on the [orb dataset](ORB)
# * parameters related to meshing, eclipse detection, and subdivision (used if rv_method=='flux-weighted') are explained in the section on the [mesh dataset](MESH)
# * parameters related to computing fluxes (used if rv_method=='flux-weighted') are explained in the section on the [lc dataset](LC)
# In[8]:
ps_compute = phoebe.compute.phoebe()
print ps_compute
# ### rv_method
# In[9]:
print ps_compute['rv_method']
# If rv_method is set to 'dynamical' then the computed radial velocities are simply the z-velocities of the centers of mass of each component. In this case, only the dynamical options are relevant. For more details on these, see the section on the [orb dataset](ORB).
#
# If rv_method is set to 'flux-weighted' then radial velocities are determined by the z-velocity of each visible surface element of the mesh, weighted by their respective intensities. Since the stars are placed in their orbits by the dynamic options, the section on the [orb dataset](ORB) is still applicable. So are the meshing options described in [mesh dataset](MESH) and the options for computing fluxes in [lc dataset](LC).
# ### rv_grav
# In[10]:
print ps_compute['rv_grav']
# See the [Gravitational Redshift Example Script](../examples/grav_redshift) for more details on the influence this parameter has on radial velocities.
# Synthetics
# ------------------
# In[11]:
b.add_dataset('rv', times=np.linspace(0,1,101), dataset='rv01')
# In[12]:
b.run_compute(irrad_method='none')
# In[13]:
b['rv@model'].twigs
# In[14]:
print b['times@primary@rv@model']
# In[15]:
print b['rvs@primary@rv@model']
# Plotting
# ---------------
#
# By default, RV datasets plot as 'rvs' vs 'times'.
# In[16]:
axs, artists = b['rv@model'].plot()
# Since these are the only two columns available in the synthetic model, the only other options is to plot in phase instead of time.
# In[17]:
axs, artists = b['rv@model'].plot(x='phases')
# In system hierarchies where there may be multiple periods, it is also possible to determine whose period to use for phasing.
# In[18]:
b['period'].components
# In[19]:
axs, artists = b['rv@model'].plot(x='phases:binary')
# Mesh Fields
# ---------------------
#
# If a mesh dataset exists at any of the same times as the time array in the rv dataset, *or* if pbmesh is set to True in the compute options, then radial velocities for each surface element will be available in the model as well (only if mesh_method=='flux_weighted').
#
# Since the radial velocities are flux-weighted, the flux-related quantities are also included. For a description of these, see the section on the [lc dataset](LC).
#
# Let's add a single mesh at the first time of the rv dataset and re-call run_compute
# In[20]:
b.add_dataset('mesh', times=[0], dataset='mesh01')
# In[21]:
b.run_compute(irrad_method='none')
# In[22]:
print b['model'].datasets
# These new columns are stored with the rv's dataset tag, but with the mesh model-kind.
# In[23]:
b.filter(dataset='rv01', kind='mesh', context='model').twigs
# Any of these columns are then available to use as edge or facecolors when plotting the mesh (see the section on the [MESH dataset](MESH)), but since the mesh elements are stored with the 'mesh01' dataset tag, and the rv (including flux-related) quantities are stored with the 'rv01' dataset tag, it is important not to provide the 'mesh01' dataset tag before plotting.
# In[24]:
axs, artists = b['mesh@model'].plot(facecolor='rvs', edgecolor=None)
# NOT:
# axs, artists = b['mesh01@model'].plot(facecolor='rvs', edgecolor=None)
# ### rvs
# In[25]:
print b['rvs@primary@rv01@mesh@model']
| gpl-3.0 |
dpaiton/OpenPV | pv-core/analysis/python/plot_fourier_kcluster.py | 1 | 20158 | """
Plots the k-means clustering
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
import radialProfile
import pylab as py
if len(sys.argv) < 3:
print "usage: kclustering filename on, filename off, k"
print len(sys.argv)
sys.exit()
w = rw.PVReadWeights(sys.argv[1])
wOff = rw.PVReadWeights(sys.argv[2])
space = 1
d = np.zeros((4,4))
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 20
marginstart = margin
marginend = nx - margin
acount = 0
if len(sys.argv) == 3:
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
x2 = (x / 16.0)
y2 = (y / 16.0)
x = (x / 4.0)
y = (y / 4.0)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = P[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4d, y=%1.4d, x2=%1.4d, y2=%1.4d'%(int(x), int(y), int(x2), int(y2))
k = 16
for ko in range(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
poff = wOff.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
acount = acount + 1
if kxOn == margin + 1 and kyOn == margin + 1:
don = p
doff = poff
d = np.append(don, doff)
else:
don = p
doff = poff
e = np.append(don, doff)
d = np.vstack((d,e))
wd = sp.whiten(d)
result = sp.kmeans2(wd, k)
cluster = result[1]
nx_im = 2 * (nxp + space) + space
ny_im = k * (nyp + space) + space
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
nx_im2 = nx * (nxp)
ny_im2 = ny * (nyp)
im2 = np.zeros((nx_im2, ny_im2))
im2[:,:] = (w.max - w.min) / 2.
nx_im3 = nx * (nxp)
ny_im3 = ny * (nyp)
im3 = np.zeros((nx_im3, ny_im3))
im3[:,:] = (w.max - w.min) / 2.
b = result[0]
c = np.hsplit(b, 2)
con = c[0]
coff = c[1]
for i in range(k):
d = con[i].reshape(nxp, nyp)
numrows, numcols = d.shape
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im[y:y+nyp, x:x+nxp] = d
for i in range(k):
e = coff[i].reshape(nxp, nyp)
numrows, numcols = e.shape
i = i + k
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im[y:y+nyp, x:x+nxp] = e
kcount1 = 0.0
kcount2 = 0.0
kcount3 = 0.0
kcount4 = 0.0
kcount5 = 0.0
kcount6 = 0.0
kcount7 = 0.0
kcount8 = 0.0
kcount9 = 0.0
kcount10 = 0.0
kcount11 = 0.0
kcount12 = 0.0
kcount13 = 0.0
kcount14= 0.0
kcount15 = 0.0
kcount16 = 0.0
for i in range(acount):
if cluster[i] == 0:
kcount1 = kcount1 + 1
if cluster[i] == 1:
kcount2 = kcount2 + 1
if cluster[i] == 2:
kcount3 = kcount3 + 1
if cluster[i] == 3:
kcount4 = kcount4 + 1
if cluster[i] == 4:
kcount5 = kcount5 + 1
if cluster[i] == 5:
kcount6 = kcount6 + 1
if cluster[i] == 6:
kcount7 = kcount7 + 1
if cluster[i] == 7:
kcount8 = kcount8 + 1
if cluster[i] == 8:
kcount9 = kcount9 + 1
if cluster[i] == 9:
kcount10 = kcount10 + 1
if cluster[i] == 10:
kcount11 = kcount11 + 1
if cluster[i] == 11:
kcount12 = kcount12 + 1
if cluster[i] == 12:
kcount13 = kcount13 + 1
if cluster[i] == 13:
kcount14 = kcount14 + 1
if cluster[i] == 14:
kcount15 = kcount15 + 1
if cluster[i] == 15:
kcount16 = kcount16 + 1
kcountper1 = kcount1 / acount
kcountper2 = kcount2 / acount
kcountper3 = kcount3 / acount
kcountper4 = kcount4 / acount
kcountper5 = kcount5 / acount
kcountper6 = kcount6 / acount
kcountper7 = kcount7 / acount
kcountper8 = kcount8 / acount
kcountper9 = kcount9 / acount
kcountper10 = kcount10 / acount
kcountper11 = kcount11 / acount
kcountper12 = kcount12 / acount
kcountper13 = kcount13 / acount
kcountper14 = kcount14 / acount
kcountper15 = kcount15 / acount
kcountper16 = kcount16 / acount
fig = plt.figure()
ax = fig.add_subplot(111)
textx = (-7/16.0) * k
texty = (10/16.0) * k
ax.set_title('On and Off K-means')
ax.set_axis_off()
ax.text(textx, texty,'ON\n\nOff', fontsize='xx-large', rotation='horizontal')
ax.text( -5, 12, "Percent %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" %(kcountper1, kcountper2, kcountper3, kcountper4, kcountper5, kcountper6, kcountper7, kcountper8, kcountper9, kcountper10, kcountper11, kcountper12, kcountper13, kcountper14, kcountper15, kcountper16), fontsize='large', rotation='horizontal')
ax.text(-4, 14, "Patch 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", fontsize='x-large', rotation='horizontal')
ax.imshow(im, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
plt.show()
vertk = input('Which k-clusters are vertical: ')
horik = input('Which k-clusters are horizontal: ')
vertk = np.array(vertk)
horik = np.array(horik)
vertk = vertk - 1
horik = horik - 1
fall = []
for i in range(16):
fall = np.append(fall, i)
vh = np.append(vertk, horik)
vhcount = 0
for j in range(k):
for i in range(len(vh)):
if vh[i] == j:
fall = np.delete(fall, (j-vhcount))
vhcount += 1
break
print "fall = ", fall
print "vh = ", vh
leng = math.sqrt(len(cluster))
im = np.zeros((leng, leng))
vcount = 0
hcount = 0
fcount = 0
for o in range(k):
for i in range(len(vertk)):
if vertk[i] == o:
count = 0
d = 0
w.rewind()
for ko in np.arange(numpat):
kx = conv.kxPos(ko, nx, ny, nf)
ky = conv.kyPos(ko, nx, ny, nf)
if marginstart < kx < marginend:
if marginstart < ky < marginend:
if cluster[count] == o:
e = 0
count = count + 1
vcount+=1
im[kx-margin-1, ky-margin-1] = e
else:
count = count + 1
for i in range(len(horik)):
if horik[i] == o:
count = 0
d = 0
w.rewind()
for ko in np.arange(numpat):
kx = conv.kxPos(ko, nx, ny, nf)
ky = conv.kyPos(ko, nx, ny, nf)
if marginstart < kx < marginend:
if marginstart < ky < marginend:
if cluster[count] == o:
e = 1
count = count + 1
hcount+=1
im[kx-margin-1, ky-margin-1] = e
else:
count = count + 1
for i in range(len(fall)):
if fall[i] == o:
count = 0
d = 0
w.rewind()
for ko in np.arange(numpat):
kx = conv.kxPos(ko, nx, ny, nf)
ky = conv.kyPos(ko, nx, ny, nf)
if marginstart < kx < marginend:
if marginstart < ky < marginend:
if cluster[count] == o:
e = 0.5
count = count + 1
fcount+=1
im[kx-margin-1, ky-margin-1] = e
else:
count = count + 1
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im)
plt.show()
print "Num of Vertical = ", vcount
print "Num of Horizontal = ", hcount
print "Num of Neither = ", fcount
F1 = np.fft.fft2(im)
F2 = np.fft.fftshift(F1)
psd2D = np.abs(F2)**2
psd1D = radialProfile.azimuthalAverage(psd2D)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(np.log10(im), cmap=py.cm.Greys)
ax.set_ylabel('Vertical=%d\n Horizontal=%d\nNeither=%d' %(vcount, hcount, fcount), rotation = 'horizontal')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(np.log10(psd2D))
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.semilogy(psd1D)
ax3.set_xlabel('Spatial Frequency')
ax3.set_ylabel('Power Spectrum')
plt.show()
sys.exit()
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count2+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im2 = np.append(im2, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count3+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im3 = np.append(im3, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count4+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im4 = np.append(im4, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count5+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im5 = np.append(im5, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count6+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im6 = np.append(im6, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count7+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im7 = np.append(im7, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count8+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im8 = np.append(im8, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count9+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im9 = np.append(im9, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count10+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im10 = np.append(im10, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count11+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im11 = np.append(im11, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count12+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im12 = np.append(im12, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count13+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im13 = np.append(im13, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count14+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im14 = np.append(im14, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count15+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im15 = np.append(im15, e)
feature += 1
#######
cf = np.matrix(con[feature])
count = 0
d = np.zeros((nxp,nyp))
d = 0
w.rewind()
for ko in np.arange(numpat):
kxOn = conv.kxPos(ko, nx, ny, nf)
kyOn = conv.kyPos(ko, nx, ny, nf)
p = w.next_patch()
if marginstart < kxOn < marginend:
if marginstart < kyOn < marginend:
if cluster[count] == feature:
e = np.matrix(p)
e = e.reshape(16, 1)
e = cf * e
count = count + 1
count16+=1
else:
e = d
count = count + 1
else:
e = d
else:
e = d
im16 = np.append(im16, e)
feature += 1
numpat = float(numpat)
im1 = (im1 * count1) / numpat
im2 = (im2 * count2) / numpat
im3 = (im3 * count3) / numpat
im4 = (im4 * count4) / numpat
im5 = (im5 * count5) / numpat
im6 = (im6 * count6) / numpat
im7 = (im7 * count7) / numpat
im8 = (im8 * count8) / numpat
im9 = (im9 * count9) / numpat
im10 = (im10 * count10) / numpat
im11 = (im11 * count11) / numpat
im12 = (im12 * count12) / numpat
im13 = (im13 * count13) / numpat
im14 = (im14 * count14) / numpat
im15 = (im15 * count15) / numpat
im16 = (im16 * count16) / numpat
totalim = im1
totalim = np.vstack((totalim, im2))
totalim = np.vstack((totalim, im3))
totalim = np.vstack((totalim, im4))
totalim = np.vstack((totalim, im5))
totalim = np.vstack((totalim, im6))
totalim = np.vstack((totalim, im7))
totalim = np.vstack((totalim, im8))
totalim = np.vstack((totalim, im9))
totalim = np.vstack((totalim, im10))
totalim = np.vstack((totalim, im11))
totalim = np.vstack((totalim, im12))
totalim = np.vstack((totalim, im13))
totalim = np.vstack((totalim, im14))
totalim = np.vstack((totalim, im15))
totalim = np.vstack((totalim, im16))
totalim = np.average(totalim, axis=0)
print "shape = ", np.shape(totalim)
totalim = np.reshape(totalim, (128, 128))
F1 = np.fft.fft2(totalim)
F2 = np.fft.fftshift(F1)
psd2D = np.abs(F2)**2
psd1D = radialProfile.azimuthalAverage(psd2D)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(np.log10(totalim), cmap=py.cm.Greys)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(np.log10(psd2D))
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.semilogy(psd1D)
ax3.set_xlabel('Spatial Frequency')
ax3.set_ylabel('Power Spectrum')
plt.show()
sys.exit()
if 1 == 1:
cluster = cluster + 1
leng = np.shape(cluster)[0]
leng = math.sqrt(leng)
cluster = cluster.reshape(leng,leng)
F1 = np.fft.fft2(cluster)
F2 = np.fft.fftshift(F1)
psd2D = np.abs(F2)**2
psd1D = radialProfile.azimuthalAverage(psd2D)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(np.log10(cluster), cmap=py.cm.Greys)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(np.log10(psd2D))
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.semilogy(psd1D)
ax3.set_xlabel('Spatial Frequency')
ax3.set_ylabel('Power Spectrum')
plt.show()
#######################################
sys.exit()
| epl-1.0 |
CagataySonmez/EdgeCloudSim | scripts/sample_app5/ai_trainer/data_convertor.py | 1 | 5014 | import pandas as pd
import json
import sys
if len (sys.argv) != 5:
print('invalid arguments. Usage:')
print('python data_conventor.py config.json [edge|cloud_rsu|cloud_gsm] [classifier|regression] [train|test]')
sys.exit(1)
with open(sys.argv[1]) as json_data_file:
data = json.load(json_data_file)
target = sys.argv[2]
method = sys.argv[3]
datatype = sys.argv[4]
print("conversion started with args " + target + ", " + method + ", " + datatype)
sim_result_folder = data["sim_result_folder"]
num_iterations = data["num_iterations"]
train_data_ratio = data["train_data_ratio"]
min_vehicle = data["min_vehicle"]
max_vehicle = data["max_vehicle"]
vehicle_step_size = data["vehicle_step_size"]
def getDecisionColumnName(target):
if target == "edge":
COLUMN_NAME = "EDGE"
elif target == "cloud_rsu":
COLUMN_NAME = "CLOUD_VIA_RSU"
elif target == "cloud_gsm":
COLUMN_NAME = "CLOUD_VIA_GSM"
return COLUMN_NAME
def getClassifierColumns(target):
if target == "edge":
result = ["NumOffloadedTask", "TaskLength", "WLANUploadDelay", "WLANDownloadDelay", "AvgEdgeUtilization", "Result"]
elif target == "cloud_rsu":
result = ["NumOffloadedTask", "WANUploadDelay", "WANDownloadDelay", "Result"]
elif target == "cloud_gsm":
result = ["NumOffloadedTask", "GSMUploadDelay", "GSMDownloadDelay", "Result"]
return result
def getRegressionColumns(target):
if target == "edge":
result = ["TaskLength", "AvgEdgeUtilization", "ServiceTime"]
elif target == "cloud_rsu":
result = ["TaskLength", "WANUploadDelay", "WANDownloadDelay", "ServiceTime"]
elif target == "cloud_gsm":
result = ["TaskLength", "GSMUploadDelay", "GSMDownloadDelay", "ServiceTime"]
return result
def znorm(column):
column = (column - column.mean()) / column.std()
return column
data_set = []
testDataStartIndex = (train_data_ratio * num_iterations) / 100
for ite in range(num_iterations):
for vehicle in range(min_vehicle, max_vehicle+1, vehicle_step_size):
if (datatype == "train" and ite < testDataStartIndex) or (datatype == "test" and ite >= testDataStartIndex):
file_name = sim_result_folder + "/ite" + str(ite + 1) + "/" + str(vehicle) + "_learnerOutputFile.cvs"
df = [pd.read_csv(file_name, na_values = "?", comment='\t', sep=",")]
df[0]['VehicleCount'] = vehicle
#print(file_name)
data_set += df
data_set = pd.concat(data_set, ignore_index=True)
data_set = data_set[data_set['Decision'] == getDecisionColumnName(target)]
if method == "classifier":
targetColumns = getClassifierColumns(target)
else:
targetColumns= getRegressionColumns(target)
if datatype == "train":
print ("##############################################################")
print ("Stats for " + target + " - " + method)
print ("Please use relevant information from below table in java side:")
train_stats = data_set[targetColumns].describe()
train_stats = train_stats.transpose()
print(train_stats)
print ("##############################################################")
#print("balancing " + target + " for " + method)
#BALANCE DATA SET
if method == "classifier":
df0 = data_set[data_set['Result']=="fail"]
df1 = data_set[data_set['Result']=="success"]
#size = min(len(df0[df0['VehicleCount']==max_vehicle]), len(df1[df1['VehicleCount']==min_vehicle]))
size = len(df0[df0['VehicleCount']==max_vehicle]) // 2
df1 = df1.groupby('VehicleCount').apply(lambda x: x if len(x) < size else x.sample(size))
df0 = df0.groupby('VehicleCount').apply(lambda x: x if len(x) < size else x.sample(size))
data_set = pd.concat([df0, df1], ignore_index=True)
else:
data_set = data_set[data_set['Result'] == 'success']
#size = min(len(data_set[data_set['VehicleCount']==min_vehicle]), len(data_set[data_set['VehicleCount']==max_vehicle]))
size = len(data_set[data_set['VehicleCount']==max_vehicle]) // 3
data_set = data_set.groupby('VehicleCount').apply(lambda x: x if len(x.index) < size else x.sample(size))
#EXTRACT RELATED ATTRIBUTES
df = pd.DataFrame(columns=targetColumns)
for column in targetColumns:
if column == 'Result' or column == 'ServiceTime':
df[column] = data_set[column]
else:
df[column] = znorm(data_set[column])
f = open(sim_result_folder + "/" + target + "_" + method + "_" + datatype + ".arff", 'w')
f.write('@relation ' + target + '\n\n')
for column in targetColumns:
if column == 'Result':
f.write('@attribute class {fail,success}\n')
else:
f.write('@attribute ' + column + ' REAL\n')
f.write('\n@data\n')
df.to_csv(f, header=False, index=False)
f.close()
print ("##############################################################")
print ("Operation completed!")
print (".arff file is generated for weka.")
print ("##############################################################")
| gpl-3.0 |
jwi078/incubator-airflow | airflow/hooks/presto_hook.py | 1 | 2964 | from builtins import str
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
import logging
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
if (hasattr(e, 'message') and
'errorName' in e.message and
'message' in e.message):
# Use the structured error data in the raised exception
raise PrestoException('{name}: {message}'.format(
name=e.message['errorName'], message=e.message['message']))
else:
raise PrestoException(str(e))
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplementedError()
| apache-2.0 |
herberthudson/pynance | pynance/opt/price.py | 2 | 7070 | """
.. Copyright (c) 2014, 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Options - price (:mod:`pynance.opt.price`)
==================================================
.. currentmodule:: pynance.opt.price
"""
from __future__ import absolute_import
import pandas as pd
from ._common import _getprice
from ._common import _relevant_rows
from . import _constants
class Price(object):
"""
Wrapper class for :class:`pandas.DataFrame` for retrieving
options prices.
Objects of this class are not intended for direct instantiation
but are created as attributes of objects of type :class:`~pynance.opt.core.Options`.
.. versionadded:: 0.3.0
Parameters
----------
df : :class:`pandas.DataFrame`
Options data.
Attributes
----------
data : :class:`pandas.DataFrame`
Options data.
Methods
-------
.. automethod:: exps
.. automethod:: get
.. automethod:: metrics
.. automethod:: strikes
"""
def __init__(self, df):
self.data = df
def get(self, opttype, strike, expiry):
"""
Price as midpoint between bid and ask.
Parameters
----------
opttype : str
'call' or 'put'.
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : float
Examples
--------
>>> geopts = pn.opt.get('ge')
>>> geopts.price.get('call', 26., '2015-09-18')
0.94
"""
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
return _getprice(_optrow)
def metrics(self, opttype, strike, expiry):
"""
Basic metrics for a specific option.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : :class:`pandas.DataFrame`
"""
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
_index = ['Opt_Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int', 'Underlying_Price', 'Quote_Time']
_out = pd.DataFrame(index=_index, columns=['Value'])
_out.loc['Opt_Price', 'Value'] = _opt_price = _getprice(_optrow)
for _name in _index[2:]:
_out.loc[_name, 'Value'] = _optrow.loc[:, _name].values[0]
_eq_price = _out.loc['Underlying_Price', 'Value']
if opttype == 'put':
_out.loc['Time_Val'] = _get_put_time_val(_opt_price, strike, _eq_price)
else:
_out.loc['Time_Val'] = _get_call_time_val(_opt_price, strike, _eq_price)
return _out
def strikes(self, opttype, expiry):
"""
Retrieve option prices for all strikes of a given type with a given expiration.
Parameters
----------
opttype : str ('call' or 'put')
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : datetime.datetime
Time of quote.
See Also
--------
:meth:`exps`
"""
_relevant = _relevant_rows(self.data, (slice(None), expiry, opttype,),
"No key for {} {}".format(expiry, opttype))
_index = _relevant.index.get_level_values('Strike')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_underlying = _relevant.loc[:, 'Underlying_Price'].values[0]
_quotetime = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_strike_ix(_df, opttype, 'Price', 'Time_Val', _underlying)
return _df, _underlying, _quotetime
def exps(self, opttype, strike):
"""
Prices for given strike on all available dates.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : :class:`datetime.datetime`
Time of quote.
See Also
--------
:meth:`strikes`
"""
_relevant = _relevant_rows(self.data, (strike, slice(None), opttype,),
"No key for {} {}".format(strike, opttype))
_index = _relevant.index.get_level_values('Expiry')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_eq = _relevant.loc[:, 'Underlying_Price'].values[0]
_qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike)
return _df, _eq, _qt
def _set_tv_other_ix(df, opttype, pricecol, tvcol, eqprice, strike):
if opttype == 'put':
if strike <= eqprice:
df.loc[:, tvcol] = df.loc[:, pricecol]
else:
_diff = eqprice - strike
df.loc[:, tvcol] = df.loc[:, pricecol] + _diff
else:
if eqprice <= strike:
df.loc[:, tvcol] = df.loc[:, pricecol]
else:
_diff = strike - eqprice
df.loc[:, tvcol] = df.loc[:, pricecol] + _diff
def _set_tv_strike_ix(df, opttype, pricecol, tvcol, eqprice):
df.loc[:, tvcol] = df.loc[:, pricecol]
if opttype == 'put':
_mask = (df.index > eqprice)
df.loc[_mask, tvcol] += eqprice - df.index[_mask]
else:
_mask = (df.index < eqprice)
df.loc[_mask, tvcol] += df.index[_mask] - eqprice
return
def _get_put_time_val(putprice, strike, eqprice):
if strike <= eqprice:
return putprice
return round(putprice + eqprice - strike, _constants.NDIGITS_SIG)
def _get_call_time_val(callprice, strike, eqprice):
if eqprice <= strike:
return callprice
return round(callprice + strike - eqprice, _constants.NDIGITS_SIG)
| mit |
molpopgen/pylibseq | docs/conf.py | 2 | 9974 | # -*- coding: utf-8 -*-
#
# pylibseq documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 19 19:11:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
import shlex
#os.environ['LD_LIBRARY_PATH']=sys.prefix+'/lib'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if (os.environ.get('READTHEDOCS')=="True") is False:
sys.path.insert(0, os.path.abspath('..'))
else:
import site
p=site.getsitepackages()[0]
sys.path.insert(0,p)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.bibtex',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pylibseq'
copyright = u'2015, Kevin Thornton'
author = u'Kevin Thornton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.3'
# The full version, including alpha/beta/rc tags.
release = '0.2.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
if (os.environ.get('READTHEDOCS')=="True") is True:
html_theme_options = {
'github_user':'molpopgen',
'github_repo':'pylibseq',
# 'github_button':True,
# 'github_banner':True,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pylibseqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pylibseq.tex', u'pylibseq Documentation',
u'Kevin Thornton', 'manual'),
]
autoclass_content = 'both'
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pylibseq', u'pylibseq Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pylibseq', u'pylibseq Documentation',
author, 'pylibseq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
kaiseu/pat-data-processing | component/cpu.py | 1 | 2039 | #!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: kai.a.xu@intel.com
@file: cpu.py
@time: 8/15/2017 10:50
@desc:
"""
import numpy as np
import pandas as pd
from component import base
class Cpu(base.CommonBase):
"""
Node CPU attribute, phasing cpu data from original PAT file
"""
used_col = ['HostName', 'TimeStamp', '%user', '%nice', '%system', '%iowait', '%steal', '%idle']
converter = {col: np.float32 for col in used_col[2:]}
def __init__(self):
pass
def __init__(self, file_path):
self.file_path = file_path
def get_data_by_time(self, start, end):
"""
get average value of this attribute and all raw data within the start and end timestamp.
if start and end all equal to [0] will calculate all the data.
:param start: list of start timestamp
:param end: list of end timestamp, should be the same length of start
:return: dict that contains avg value of all the timestamp pair and all raw data
"""
df = pd.read_csv(self.file_path, delim_whitespace=True,
usecols=self.used_col, header=0)
pd.to_datetime(df['TimeStamp'], unit='s')
df = df.set_index('TimeStamp').astype(self.converter)
avg = []
if start[0] == end[0] == 0: # calc all the data
avg.append(df.loc[:, self.used_col[2:]].mean())
if len(start) == 1:
return avg, df
else:
for i in range(1, len(start)): # calc the data within the pair of time period
avg.append(df.loc[start[i]:end[i], self.used_col[2:]].mean(axis=0))
return avg, df
for i in range(len(start)): # calc the data within the pair of time period
avg.append(df.loc[start[i]:end[i], self.used_col[2:]].mean(axis=0))
return avg, df
def used_col_num(self):
return len(self.__used_col)
| apache-2.0 |
mrshu/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 4 | 5464 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print __doc__
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: Simplified BSD
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
l_x = float(l_x)
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
aborovin/trading-with-python | lib/csvDatabase.py | 77 | 6045 | # -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | bsd-3-clause |
mbonsma/phageParser | populate.py | 3 | 6935 | #!/usr/bin/env python
import argparse
import os
import pickle
import pandas
import requests
from Bio import Entrez, SeqIO
from lxml import html, etree
from tqdm import tqdm
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phageAPI.settings')
import django
django.setup()
from util.acc import read_accession_file
from util.prunedict import prune_dict
from util import fetch
from restapi.models import (
Organism,
Spacer,
Repeat,
LocusSpacerRepeat,
AntiCRISPR,
Locus
)
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def populate_organism():
def add_organism(name, accession):
# get the object, this also checks for duplicates
o, created = Organism.objects.get_or_create(
name=name, accession=accession)
return o
def merge_acc_names(accession_list):
acc_name_dict = {}
db = "nuccore"
# Doing batches of 200 to make sure requests to NCBI are not too big
for i in range(0, len(accession_list), 200):
j = i + 200
result_handle = Entrez.efetch(
db=db, rettype="gb", id=accession_list[i:j])
# Populate result per organism name
records = SeqIO.parse(result_handle, 'genbank')
for record in tqdm(records):
# Using NCBI name, which should match accession number passed
acc_name_dict[record.name] = record.annotations['organism']
return acc_name_dict
with open(os.path.join(DATA_DIR, 'bac_accession_list.txt')) as f:
acc_name_dict = list(read_accession_file(f))
# acc_name_dict = merge_acc_names(accession_list)
for acc in acc_name_dict:
add_organism(name=acc_name_dict[acc], accession=acc)
def get_spacer_repeat_files():
spath = os.path.join(DATA_DIR, "spacerdatabase.txt")
surl = ('http://crispr.i2bc.paris-saclay.fr/'
'crispr/BLAST/Spacer/Spacerdatabase')
rpath = os.path.join(DATA_DIR, "repeatdatabase.txt")
rurl = 'http://crispr.i2bc.paris-saclay.fr/crispr/BLAST/DR/DRdatabase'
fetch.fetch(spath, surl)
fetch.fetch(rpath, rurl)
return spath, rpath
def repeatfiletodict(rfile):
rdict = {}
repeatrecords = SeqIO.parse(rfile, 'fasta')
for record in repeatrecords:
accessions = record.name.split('|')
sequence = str(record.seq)
for acc in accessions:
rdict[acc] = {'RepeatSeq': sequence}
return rdict
def addspacerstodict(gendict, sfile):
spacerrecords = SeqIO.parse(sfile, 'fasta')
for record in spacerrecords:
accessions = record.name.split('|')
sequence = str(record.seq)
for acc in accessions:
acc_elems = acc.split('_')
order = acc_elems[-1]
acc_id = '_'.join(acc_elems[:-1])
try:
if 'Spacers' in gendict[acc_id]:
gendict[acc_id]['Spacers'][order] = sequence
else:
gendict[acc_id]['Spacers'] = {order: sequence}
except KeyError:
print('Error on accession id: %s' % acc_id)
return gendict
def addpositionstodict(gendict):
print("Downloading position information from web...")
for accidwithloc in tqdm(gendict):
if 'Start' in gendict[accidwithloc]:
continue
accid = '_'.join(accidwithloc.split('_')[:-1])
url = ('http://crispr.i2bc.paris-saclay.fr/crispr/crispr_db.php?'
'checked%5B%5D={}'.format(accid))
page = requests.get(url)
htmltable = html.fromstring(page.content).xpath(
"//table[normalize-space(@class)='primary_table']")[1]
strtable = etree.tostring(htmltable)
# converts to pandas df and then to numpy array then drop titles
arrtable = pandas.read_html(strtable)[0].as_matrix()[2:]
for row in arrtable:
if row[0] in gendict:
gendict[row[0]]['Start'] = row[2]
gendict[row[0]]['Stop'] = row[3]
else:
if row[1] != 'questionable':
print("Can't find %s in local files" % row[0])
return gendict
def populate_fromlocus(locid, locdict):
accid = '_'.join(locid.split('_')[:-1])
organismset = Organism.objects.filter(accession=accid)
if not organismset.exists():
print('Organism with accid %s not found in db' % accid)
return
organism = organismset[0]
repeat, _ = Repeat.objects.get_or_create(sequence=locdict['RepeatSeq'])
loc_start = int(locdict['Start'])
loc_end = int(locdict['Stop'])
locus, _ = Locus.objects.get_or_create(
organism=organism,
genomic_start=loc_start,
genomic_end=loc_end
)
spacers = locdict['Spacers']
for order in sorted(spacers):
spacer, _ = Spacer.objects.get_or_create(sequence=spacers[order])
order = int(order)
lsr, _ = LocusSpacerRepeat.objects.get_or_create(
locus=locus,
spacer=spacer,
repeat=repeat,
order=order
)
spacer.save()
lsr.save()
locus.save()
repeat.save()
organism.save()
def populate_lsrpair():
print('Downloading files and gathering online data.')
sfile, rfile = get_spacer_repeat_files()
gendict = prune_dict(
addpositionstodict(
addspacerstodict(
repeatfiletodict(rfile), sfile)))
with open('dbbackups/genedict.pickle', 'rb') as f:
pickle.dump(gendict, f, protocol=pickle.HIGHEST_PROTOCOL)
print('Created dictionary and dumped data to genedict.pickle')
print("Populating Spacer, Repeat, SpacerRepeatPair, "
"OrganismSpacerRepeatPair tables")
for locid in tqdm(gendict):
populate_fromlocus(locid, gendict[locid])
def populate_anticrispr():
with open(os.path.join(DATA_DIR, 'antiCRISPR_accessions.txt')) as f:
accession_list = list(read_accession_file(f))
print("Fetching AntiCRISPR entries")
result_handle = Entrez.efetch(
db='protein', rettype="fasta", id=accession_list)
for record in tqdm(SeqIO.parse(result_handle, 'fasta')):
spacer, _ = AntiCRISPR.objects.get_or_create(
accession=record.name,
sequence=str(record.seq))
spacer.save()
def main():
parser = argparse.ArgumentParser(
description='Populate the phageParser database with data from NCBI'
)
parser.add_argument(
'email',
nargs=1,
help=('your email address (does not need to be registered, '
'just used to identify you)')
)
args = parser.parse_args()
Entrez.email = args.email
print("Starting organism population")
populate_organism()
print("Starting LSR population")
populate_lsrpair()
print("Starting AntiCRISPR population")
populate_anticrispr()
if __name__ == '__main__':
main()
| mit |
massmutual/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
ortylp/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
huzq/scikit-learn | examples/neighbors/plot_nca_dim_reduction.py | 24 | 3839 | """
==============================================================
Dimensionality Reduction with Neighborhood Components Analysis
==============================================================
Sample usage of Neighborhood Components Analysis for dimensionality reduction.
This example compares different (linear) dimensionality reduction methods
applied on the Digits data set. The data set contains images of digits from
0 to 9 with approximately 180 samples of each class. Each image is of
dimension 8x8 = 64, and is reduced to a two-dimensional data point.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
Neighborhood Components Analysis (NCA) tries to find a feature space such
that a stochastic nearest neighbor algorithm will give the best accuracy.
Like LDA, it is a supervised method.
One can see that NCA enforces a clustering of the data that is visually
meaningful despite the large reduction in dimension.
"""
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import (KNeighborsClassifier,
NeighborhoodComponentsAnalysis)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
print(__doc__)
n_neighbors = 3
random_state = 0
# Load Digits dataset
X, y = datasets.load_digits(return_X_y=True)
# Split into train/test
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.5, stratify=y,
random_state=random_state)
dim = len(X[0])
n_classes = len(np.unique(y))
# Reduce dimension to 2 with PCA
pca = make_pipeline(StandardScaler(),
PCA(n_components=2, random_state=random_state))
# Reduce dimension to 2 with LinearDiscriminantAnalysis
lda = make_pipeline(StandardScaler(),
LinearDiscriminantAnalysis(n_components=2))
# Reduce dimension to 2 with NeighborhoodComponentAnalysis
nca = make_pipeline(StandardScaler(),
NeighborhoodComponentsAnalysis(n_components=2,
random_state=random_state))
# Use a nearest neighbor classifier to evaluate the methods
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
# Make a list of the methods to be compared
dim_reduction_methods = [('PCA', pca), ('LDA', lda), ('NCA', nca)]
# plt.figure()
for i, (name, model) in enumerate(dim_reduction_methods):
plt.figure()
# plt.subplot(1, 3, i + 1, aspect=1)
# Fit the method's model
model.fit(X_train, y_train)
# Fit a nearest neighbor classifier on the embedded training set
knn.fit(model.transform(X_train), y_train)
# Compute the nearest neighbor accuracy on the embedded test set
acc_knn = knn.score(model.transform(X_test), y_test)
# Embed the data set in 2 dimensions using the fitted model
X_embedded = model.transform(X)
# Plot the projected points and show the evaluation score
plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y, s=30, cmap='Set1')
plt.title("{}, KNN (k={})\nTest accuracy = {:.2f}".format(name,
n_neighbors,
acc_knn))
plt.show()
| bsd-3-clause |
tcmoore3/mdtraj | mdtraj/tests/test_topology.py | 5 | 8412 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import tempfile
import mdtraj as md
import numpy as np
from mdtraj.utils.six.moves import cPickle
from mdtraj.utils import import_
from mdtraj.testing import get_fn, eq, skipif, assert_raises
try:
from simtk.openmm import app
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
try:
import pandas as pd
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@skipif(not HAVE_OPENMM)
def test_topology_openmm():
topology = md.load(get_fn('1bpi.pdb')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for residue in topology.residues:
residue.resSeq = residue.index
mm = topology.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(topology, topology2)
@skipif(not HAVE_OPENMM)
def test_topology_openmm_boxes():
u = import_('simtk.unit')
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
@skipif(not HAVE_PANDAS)
def test_topology_pandas():
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
topology3 = md.Topology.from_dataframe(atoms) # Make sure you default arguement of None works, see issue #774
@skipif(not HAVE_PANDAS)
def test_topology_pandas_TIP4PEW():
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers():
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
@skipif(not HAVE_PANDAS)
def test_topology_unique_elements_bpti():
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain():
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue():
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_segment_id():
top = md.load(get_fn('ala_ala_ala.pdb')).topology
assert next(top.residues).segment_id == "AAL", "Segment id is not being assigned correctly for ala_ala_ala.psf"
df = top.to_dataframe()[0]
assert len(df["segmentID"] == "AAL")==len(df), "Segment id is not being assigned correctly to topology data frame ala_ala_ala.psf"
def test_nonconsective_resSeq():
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
yield lambda : eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
yield lambda : eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
yield lambda : eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle():
# test pickling of topology (bug #391)
cPickle.loads(cPickle.dumps(md.load(get_fn('bpti.pdb')).topology))
def test_atoms_by_name():
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
assert_raises(KeyError, lambda: top.residue(15).atom('sdfsdsdf'))
def test_select_atom_indices():
top = md.load(get_fn('native.pdb')).topology
yield lambda: eq(top.select_atom_indices('alpha'), np.array([8]))
yield lambda: eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
assert_raises(ValueError, lambda: top.select_atom_indices('sdfsdfsdf'))
@skipif(not HAVE_OPENMM)
def test_top_dataframe_openmm_roundtrip():
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds():
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology():
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_unique_pairs():
n = 10
a = np.arange(n)
b = np.arange(n, n+n)
eq(md.Topology._unique_pairs(a, a).sort(), md.Topology._unique_pairs_equal(a).sort())
eq(md.Topology._unique_pairs(a, b).sort(), md.Topology._unique_pairs_mutually_exclusive(a, b).sort())
def test_select_pairs():
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
select_pairs = traj.top.select_pairs
assert len(select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert len(select_pairs(selection1='name H1', selection2='name O')) == 258 * 258
selections = iter([
# Equal
("(name O) or (name =~ 'H.*')", "(name O) or (name =~ 'H.*')"),
('all', 'all'),
# Exclusive
('name O', 'name H1'),
('name H1', 'name O'),
# Overlap
(range(traj.n_atoms), 'name O'),
('all', 'name O')])
for select1, select2 in selections:
select3, select4 = next(selections)
assert eq(select_pairs(selection1=select1, selection2=select2).sort(),
select_pairs(selection1=select3, selection2=select4).sort())
def test_to_fasta():
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
def test_subset():
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset([1,2,3])
assert t2.n_residues == 1
def test_molecules():
top = md.load(get_fn('4OH9.pdb')).topology
molecules = top.find_molecules()
assert sum(len(mol) for mol in molecules) == top.n_atoms
assert sum(1 for mol in molecules if len(mol) > 1) == 2 # All but two molecules are water
| lgpl-2.1 |
costypetrisor/scikit-learn | sklearn/metrics/ranking.py | 5 | 24965 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
camsas/qjump-nsdi15-plotting | figure1c_3c/plot_naiad_latency_cdfs.py | 2 | 7009 | # Copyright (c) 2015, Malte Schwarzkopf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Simple script which takes a file with one Naiad barrier latency (expressed as
# a signed integer) per line and plots a trivial histogram.
import os, sys, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pylab
from utils import *
#-----------
def serialize_hist(n, bins, outdir):
nf = open(outdir + "/binvals.txt", "w")
for val in n:
nf.write(str(val)+"\n")
bf = open(outdir + "/bins.txt", "w")
for val in bins:
bf.write(str(val)+"\n")
#-----------
if len(sys.argv) < 2:
print "usage: plot_naiad_latency_cdfs.py <input file 1> <label 1> ... " \
"<input file n> <label n> [output file]"
paper_mode = True
if (len(sys.argv) - 1) % 2 != 0:
outname = sys.argv[-1]
del sys.argv[-1]
else:
outname = "naiad_latency"
fnames = []
labels = []
for i in range(0, len(sys.argv) - 1, 2):
fnames.append(sys.argv[1 + i])
labels.append(sys.argv[2 + i])
if paper_mode:
fig = plt.figure(figsize=(2.33,1.55))
set_paper_rcs()
else:
fig = plt.figure()
set_rcs()
if paper_mode:
colors = paper_colors
# colors[2] = paper_colors[1]
# colors[1] = paper_colors[3]
else:
colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', '0.5']
i = 0
outliers_ignored = 0
for f in fnames:
# initial info
print "Analyzing %s:" % (f)
# parsing
j = 0
values = []
for line in open(f).readlines():
delay = float(line.strip()) * 1000
if delay < 90000: # 90ms
values.append(delay)
else:
outliers_ignored += 1
j += 1
# info output
print "--------------------------------------"
print "%s (%s)" % (labels[i], f)
print "--------------------------------------"
print "%d total samples" % (j)
print "%d outliers ignored" % (outliers_ignored)
print "--------------------------------------"
avg = np.mean(values)
print "AVG: %f" % (avg)
median = np.median(values)
print "MEDIAN: %f" % (median)
min_val = np.min(values)
print "MIN: %ld" % (min_val)
max_val = np.max(values)
print "MAX: %ld" % (max_val)
stddev = np.std(values)
print "STDEV: %f" % (stddev)
print "PERCENTILES:"
perc1 = np.percentile(values, 1)
print " 1st: %f" % (perc1)
perc10 = np.percentile(values, 10)
print " 10th: %f" % (np.percentile(values, 10))
perc25 = np.percentile(values, 25)
print " 25th: %f" % (np.percentile(values, 25))
perc50 = np.percentile(values, 50)
print " 50th: %f" % (np.percentile(values, 50))
perc75 = np.percentile(values, 75)
print " 75th: %f" % (np.percentile(values, 75))
perc90 = np.percentile(values, 90)
print " 90th: %f" % (np.percentile(values, 90))
perc99 = np.percentile(values, 99)
print " 99th: %f" % (np.percentile(values, 99))
# print "COPYABLE:"
# print avg
# print stddev
# print max_val
# print min_val
# print perc1
# print perc10
# print perc25
# print perc50
# print perc75
# print perc90
# print perc99
# figure out number of bins based on range
bin_width = 1 # 7.5ns measurement accuracy
bin_range = max_val - min_val
num_bins = min(100000, bin_range / bin_width)
print "Binning into %d bins and plotting..." % (num_bins)
# plotting
if paper_mode:
# plt.rc("font", size=8.0)
if i % 3 == 0:
style = 'solid'
elif i % 3 == 1:
style = 'dashed'
else:
style = 'dotted'
(n, bins, patches) = plt.hist(values, bins=num_bins, log=False, normed=True,
cumulative=True, histtype="step",
color=paper_colors[i % len(paper_colors)],
linestyle=style)
# hack to remove vertical bar
patches[0].set_xy(patches[0].get_xy()[:-1])
# hack to add line to legend
plt.plot([-100], [-100], label=labels[i],
color=paper_colors[i % len(paper_colors)],
linestyle=style, lw=1.0)
# serialize_hist(n, bins, os.path.dirname(outname))
else:
(n, bins, patches) = plt.hist(values, bins=num_bins, log=False, normed=True,
cumulative=True, histtype="step",
label=labels[i])
# hack to remove vertical bar
patches[0].set_xy(patches[0].get_xy()[:-1])
# serialize_hist(n, bins, os.path.dirname(outname))
i += 1
#plt.xticks(rotation=45)
# plt.xscale("log")
plt.xticks(range(0, 2001, 500), [str(x) for x in range(0, 2001, 500)])
plt.yticks(np.arange(0.0, 1.01, 0.2), [str(x) for x in np.arange(0.0, 1.01, 0.2)])
plt.xlim(0, 2250)
plt.ylim(0, 1.0)
plt.xlabel("Barrier sync.~latency [$\mu$s]")
#plt.ylabel("Cumulative distribution of latency")
#print n
#print bins
plt.legend(loc=4, frameon=False, borderaxespad=0.5, handlelength=2.5,
handletextpad=0.2)
#plt.legend(loc=8)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig("%s.pdf" % outname, format="pdf", bbox_inches="tight")
plt.ylim(0.90, 1.0)
#plt.legend(bbox_to_anchor=(-0.2, 1.02, 1.3, .102), loc=3, ncol=3, mode="expand",
# borderaxespad=0., handlelength=2.5, handletextpad=0.2)
plt.legend(loc='lower right', frameon=False, borderaxespad=0.2,
handlelength=2.5, handletextpad=0.2)
leg = plt.gca().get_legend()
frame = leg.get_frame()
frame.set_edgecolor('1.0')
frame.set_alpha(0.0)
plt.yticks(np.arange(0.9, 1.01, 0.02),
[str(x) for x in np.arange(0.9, 1.01, 0.02)])
#plt.axhline(0.999, ls='--', color='k')
plt.savefig("%s-99th.pdf" % outname, format="pdf", bbox_inches="tight")
| bsd-3-clause |
zaxtax/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
CarterBain/AlephNull | alephnull/transforms/batch_transform.py | 1 | 16947 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generator versions of transforms.
"""
import functools
import logbook
import numpy
from numbers import Integral
import pandas as pd
from alephnull.utils.data import RollingPanel
from alephnull.protocol import Event
from alephnull.finance import trading
from . utils import check_window_length
log = logbook.Logger('BatchTransform')
func_map = {'open_price': 'first',
'close_price': 'last',
'low': 'min',
'high': 'max',
'volume': 'sum'
}
def get_sample_func(item):
if item in func_map:
return func_map[item]
else:
return 'last'
def downsample_panel(minute_rp, daily_rp, mkt_close):
"""
@minute_rp is a rolling panel, which should have minutely rows
@daily_rp is a rolling panel, which should have daily rows
@dt is the timestamp to use when adding a frame to daily_rp
Using the history in minute_rp, a new daily bar is created by
downsampling. The data from the daily bar is then added to the
daily rolling panel using add_frame.
"""
cur_panel = minute_rp.get_current()
sids = minute_rp.minor_axis
day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)
dt1 = trading.environment.normalize_date(mkt_close)
dt2 = trading.environment.next_trading_day(mkt_close)
by_close = functools.partial(get_date, mkt_close, dt1, dt2)
for item in minute_rp.items:
frame = cur_panel[item]
func = get_sample_func(item)
# group by trading day, using the market close of the current
# day. If events occurred after the last close (yesterday) but
# before today's close, group them into today.
dframe = frame.groupby(lambda d: by_close(d)).agg(func)
for stock in sids:
day_frame[stock][item] = dframe[stock].ix[dt1]
# store the frame at midnight instead of the close
daily_rp.add_frame(dt1, day_frame)
def get_date(mkt_close, d1, d2, d):
if d > mkt_close:
return d2
else:
return d1
class BatchTransform(object):
"""Base class for batch transforms with a trailing window of
variable length. As opposed to pure EventWindows that get a stream
of events and are bound to a single SID, this class creates stream
of pandas DataFrames with each colum representing a sid.
There are two ways to create a new batch window:
(i) Inherit from BatchTransform and overload get_value(data).
E.g.:
```
class MyBatchTransform(BatchTransform):
def get_value(self, data):
# compute difference between the means of sid 0 and sid 1
return data[0].mean() - data[1].mean()
```
(ii) Use the batch_transform decorator.
E.g.:
```
@batch_transform
def my_batch_transform(data):
return data[0].mean() - data[1].mean()
```
In your algorithm you would then have to instantiate
this in the initialize() method:
```
self.my_batch_transform = MyBatchTransform()
```
To then use it, inside of the algorithm handle_data(), call the
handle_data() of the BatchTransform and pass it the current event:
```
result = self.my_batch_transform(data)
```
"""
def __init__(self,
func=None,
refresh_period=0,
window_length=None,
clean_nans=True,
sids=None,
fields=None,
compute_only_full=True,
bars='daily',
downsample=False):
"""Instantiate new batch_transform object.
:Arguments:
func : python function <optional>
If supplied will be called after each refresh_period
with the data panel and all args and kwargs supplied
to the handle_data() call.
refresh_period : int
Interval to wait between advances in the window.
window_length : int
How many days the trailing window should have.
clean_nans : bool <default=True>
Whether to (forward) fill in nans.
sids : list <optional>
Which sids to include in the moving window. If not
supplied sids will be extracted from incoming
events.
fields : list <optional>
Which fields to include in the moving window
(e.g. 'price'). If not supplied, fields will be
extracted from incoming events.
compute_only_full : bool <default=True>
Only call the user-defined function once the window is
full. Returns None if window is not full yet.
downsample : bool <default=False>
If true, downsample bars to daily bars. Otherwise, do nothing.
"""
if func is not None:
self.compute_transform_value = func
else:
self.compute_transform_value = self.get_value
self.clean_nans = clean_nans
self.compute_only_full = compute_only_full
# no need to down sample if the bars are already daily
self.downsample = downsample and (bars == 'minute')
# How many bars are in a day
self.bars = bars
if self.bars == 'daily':
self.bars_in_day = 1
elif self.bars == 'minute':
self.bars_in_day = int(6.5 * 60)
else:
raise ValueError('%s bars not understood.' % self.bars)
# The following logic is to allow pre-specified sid filters
# to operate on the data, but to also allow new symbols to
# enter the batch transform's window IFF a sid filter is not
# specified.
if sids is not None:
if isinstance(sids, (basestring, Integral)):
self.static_sids = set([sids])
else:
self.static_sids = set(sids)
else:
self.static_sids = None
self.initial_field_names = fields
if isinstance(self.initial_field_names, basestring):
self.initial_field_names = [self.initial_field_names]
self.field_names = set()
self.refresh_period = refresh_period
check_window_length(window_length)
self.window_length = window_length
self.trading_days_total = 0
self.window = None
self.full = False
# Set to -inf essentially to cause update on first attempt.
self.last_dt = pd.Timestamp('1900-1-1', tz='UTC')
self.updated = False
self.cached = None
self.last_args = None
self.last_kwargs = None
# Data panel that provides bar information to fill in the window,
# when no bar ticks are available from the data source generator
# Used in universes that 'rollover', e.g. one that has a different
# set of stocks per quarter
self.supplemental_data = None
self.rolling_panel = None
self.daily_rolling_panel = None
def handle_data(self, data, *args, **kwargs):
"""
Point of entry. Process an event frame.
"""
# extract dates
dts = [event.datetime for event in data._data.itervalues()]
# we have to provide the event with a dt. This is only for
# checking if the event is outside the window or not so a
# couple of seconds shouldn't matter. We don't add it to
# the data parameter, because it would mix dt with the
# sid keys.
event = Event()
event.dt = max(dts)
event.data = {k: v.__dict__ for k, v in data._data.iteritems()
# Need to check if data has a 'length' to filter
# out sids without trade data available.
# TODO: expose more of 'no trade available'
# functionality to zipline
if len(v)}
# only modify the trailing window if this is
# a new event. This is intended to make handle_data
# idempotent.
if self.last_dt < event.dt:
self.updated = True
self._append_to_window(event)
else:
self.updated = False
# return newly computed or cached value
return self.get_transform_value(*args, **kwargs)
def _init_panels(self, sids):
if self.downsample:
self.rolling_panel = RollingPanel(self.bars_in_day,
self.field_names, sids)
self.daily_rolling_panel = RollingPanel(self.window_length,
self.field_names, sids)
else:
self.rolling_panel = RollingPanel(self.window_length *
self.bars_in_day,
self.field_names, sids)
def _append_to_window(self, event):
self.field_names = self._get_field_names(event)
if self.static_sids is None:
sids = set(event.data.keys())
else:
sids = self.static_sids
# the panel sent to the transform code will have
# columns masked with this set of sids. This is how
# we guarantee that all (and only) the sids sent to the
# algorithm's handle_data and passed to the batch
# transform. See the get_data method to see it applied.
# N.B. that the underlying panel grows monotonically
# if the set of sids changes over time.
self.latest_sids = sids
# Create rolling panel if not existant
if self.rolling_panel is None:
self._init_panels(sids)
# Store event in rolling frame
self.rolling_panel.add_frame(event.dt,
pd.DataFrame(event.data,
index=self.field_names,
columns=sids))
# update trading day counters
# we may get events from non-trading sources which occurr on
# non-trading days. The book-keeping for market close and
# trading day counting should only consider trading days.
if trading.environment.is_trading_day(event.dt):
_, mkt_close = trading.environment.get_open_and_close(event.dt)
if self.bars == 'daily':
# Daily bars have their dt set to midnight.
mkt_close = trading.environment.normalize_date(mkt_close)
if event.dt == mkt_close:
if self.downsample:
downsample_panel(self.rolling_panel,
self.daily_rolling_panel,
mkt_close
)
self.trading_days_total += 1
self.mkt_close = mkt_close
self.last_dt = event.dt
if self.trading_days_total >= self.window_length:
self.full = True
def get_transform_value(self, *args, **kwargs):
"""Call user-defined batch-transform function passing all
arguments.
Note that this will only call the transform if the datapanel
has actually been updated. Otherwise, the previously, cached
value will be returned.
"""
if self.compute_only_full and not self.full:
return None
#################################################
# Determine whether we should call the transform
# 0. Support historical/legacy usage of '0' signaling,
# 'update on every bar'
if self.refresh_period == 0:
period_signals_update = True
else:
# 1. Is the refresh period over?
period_signals_update = (
self.trading_days_total % self.refresh_period == 0)
# 2. Have the args or kwargs been changed since last time?
args_updated = args != self.last_args or kwargs != self.last_kwargs
# 3. Is this a downsampled batch, and is the last event mkt close?
downsample_ready = not self.downsample or \
self.last_dt == self.mkt_close
recalculate_needed = downsample_ready and \
(args_updated or (period_signals_update and self.updated))
###################################################
if recalculate_needed:
self.cached = self.compute_transform_value(
self.get_data(),
*args,
**kwargs
)
self.last_args = args
self.last_kwargs = kwargs
return self.cached
def get_data(self):
"""Create a pandas.Panel (i.e. 3d DataFrame) from the
events in the current window.
Returns:
The resulting panel looks like this:
index : field_name (e.g. price)
major axis/rows : dt
minor axis/colums : sid
"""
if self.downsample:
data = self.daily_rolling_panel.get_current()
else:
data = self.rolling_panel.get_current()
if self.supplemental_data:
for item in data.items:
if item not in self.supplemental_data.items:
continue
for dt in data.major_axis:
try:
supplemental_for_dt = self.supplemental_data.ix[
item, dt, :]
except KeyError:
# Only filling in data available in supplemental data.
supplemental_for_dt = None
if supplemental_for_dt is not None:
data[item].ix[dt] = \
supplemental_for_dt.combine_first(
data[item].ix[dt])
# screen out sids no longer in the multiverse
data = data.ix[:, :, self.latest_sids]
if self.clean_nans:
# Fills in gaps of missing data during transform
# of multiple stocks. E.g. we may be missing
# minute data because of illiquidity of one stock
data = data.fillna(method='ffill')
# Hold on to a reference to the data,
# so that it's easier to find the current data when stepping
# through with a debugger
self._curr_data = data
return data
def get_value(self, *args, **kwargs):
raise NotImplementedError(
"Either overwrite get_value or provide a func argument.")
def __call__(self, f):
self.compute_transform_value = f
return self.handle_data
def _extract_field_names(self, event):
# extract field names from sids (price, volume etc), make sure
# every sid has the same fields.
sid_keys = []
for sid in event.data.itervalues():
keys = set([name for name, value in sid.items()
if isinstance(value,
(int,
float,
numpy.integer,
numpy.float,
numpy.long))
])
sid_keys.append(keys)
# with CUSTOM data events, there may be different fields
# per sid. So the allowable keys are the union of all events.
union = set.union(*sid_keys)
unwanted_fields = set(['portfolio', 'sid', 'dt', 'type',
'datetime', 'source_id'])
return union - unwanted_fields
def _get_field_names(self, event):
if self.initial_field_names is not None:
return self.initial_field_names
else:
self.latest_names = self._extract_field_names(event)
return set.union(self.field_names, self.latest_names)
def batch_transform(func):
"""Decorator function to use instead of inheriting from BatchTransform.
For an example on how to use this, see the doc string of BatchTransform.
"""
@functools.wraps(func)
def create_window(*args, **kwargs):
# passes the user defined function to BatchTransform which it
# will call instead of self.get_value()
return BatchTransform(*args, func=func, **kwargs)
return create_window
| apache-2.0 |
ntbrewer/DAQ_1 | kick-u3/KickPlot.py | 1 | 4859 | #!/usr/bin/python3
# /bin/python3
# #####################################################
# This script makes plots of the mtc cycle
# written to the labjack outputs for control and monitoring.
# This script can be used anywhere by changing the
# location of the python3 directive after #!.
# usage: ./KickPlot.py and use default
# or: ./KickPlot.py file_name
# Based on TempPlot.py in kelvin
# N.T. BREWER 11-26-19
# ######################################################
# IMPORTS ---------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pexpect as pxp
import time
import numpy as np
import sys
#import binascii as ba
# -----------------------------------------------------------------------------
# DEFINITIONS -----------------------------------------------------------------
def getFileName():
try:
s = pxp.run('ls -rlt log/').split()[-1].decode()
print(s)
except IndexError:
print('file not found: input filename')
s = input()
if s != '':
return('log/'+s)
else:
file_name = 'log/kick-TueNov2610:58:522019.log'
print('Warning, standard usage is: ./KickPlot.py file_name\n' +
'using default file name:' + file_name)
def getLines(filename):
# Maybe add way to return max number of lines
# and check that request cannot be bigger?
inf = open(filename, "r")
lines = inf.readlines()
inf.close()
return(lines)
# -----------------------------------------------------------------------------
# READ IN FILE ----------------------------------------------------------------
try:
file_name = sys.argv[1]
# file_name = 'therm-TueAug1408:51:322018.log'
except IndexError:
file_name = getFileName()
inf = open(file_name)
lines = inf.readlines()
blocks = []
# -----------------------------------------------------------------------------
# PULL DATE FROM FILE ---------------------------------------------------------
date = file_name.split('kick-')[-1].split('.log')[0]
if len(date.split(':')) and len(date.split(':')) < 3:
date = input('date not found in file name. ' +
'Input date as datetime format %m%d%H%M%S%Y' +
'i.e. Sep. 9th 2015 at 11:11:03pm is 09092311032015: ')
s = time.mktime(time.strptime(date, '%m%d%H%M%S%Y')) + 62135665200.0
d = s/(86400)
else:
if len(date.split(':')[0]) == 9:
date = date[0:6] + '0' + date[6:]
if len(date.split(':')[0]) == 9:
date = date[0:6] + '0' + date[6:]
s = time.mktime(time.strptime(date, '%a%b%d%H:%M:%S%Y')) + 62135665200.0
# s = time.mktime(time.strptime(date, '%a%b%d%H_%M_%S%Y')) + 62135665200.0
d = s/(86400)
print(d)
print('start date ' + str(date))
# -----------------------------------------------------------------------------
# PLOT DATA POINTS ------------------------------------------------------------
# Displays plot if the line below is set to True
dispPlot = True
#dispPlot = False
# Plots just the last 24 hours if line below is set to True
#lastDay = False
lastDay = True
if len(lines) > 2000 and lastDay:
for i in lines[3+len(lines)-2000:]:
blocks.append(i.split('\t'))
else:
for i in lines[3:]:
blocks.append(i.split('\t'))
tdat = []
edat = []
cdat = []
fdat = []
for i in blocks:
tdat.append(eval(i[0]))
edat.append(list(bin(int(i[1],16))[2:].zfill(8)))
cdat.append(list(bin(int(i[2],16))[2:].zfill(8)))
fdat.append(list(bin(int(i[3],16))[2:].zfill(8)))
for i in range(0,len(edat)):
for j in range(0,8):
edat[i][j]=eval(edat[i][j])
cdat[i][j]=eval(cdat[i][j])
fdat[i][j]=eval(fdat[i][j])
earr = np.array(edat)
carr = np.array(cdat)
#fig = plt.figure()
#fig.autofmt_xdate()
legList = ['meas On','meas Off', 'bkg On', 'bkg Off', 'kick', 'beam Off','tape move','tape Off','trig', 'beam On', 'lite On', 'lite Off']
ax = list(range(0,12))
#plt.subplots(12,1,sharex='all')
ax[0] = plt.subplot(12,1,1)
for i in range(0,8):
ax[i] = plt.subplot(12,1,i+1,sharex=ax[0])
plt.plot(tdat, earr[:,-(i+1)], drawstyle='steps-post')
plt.plot(tdat, earr[:,-(i+1)], 'ko')
plt.legend([legList[i]])
plt.ylim(-.5,1.5)
for i in range(0,4):
ax[i] = plt.subplot(12,1,i+9,sharex=ax[0])
#plt.plot(tdat, earr[:,i], '-')
plt.plot(tdat, carr[:,-(i+1)], drawstyle='steps-post')
plt.plot(tdat, carr[:,-(i+1)], 'bo')
plt.legend([legList[i+8]])
plt.ylim(-.5,1.5)
plt.figure()
plt.plot(earr[:,-1],earr[:,-2],'ko')
plt.figure()
plt.plot(earr[:,-3],earr[:,-4],'bo')
plt.figure()
plt.plot(earr[:,-7],earr[:,-8],'ro')
plt.show()
plt.savefig('report.png')
if dispPlot:
plt.show()
# print('done')
# -----------------------------------------------------------------------------
| gpl-3.0 |
henridwyer/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
wclark3/machine-learning | final-project/md_sandbox/main.py | 1 | 3481 | #!/usr/bin/env python
import abc
import operator
import time
import lasagne
import numpy as np
import theano
import theano.tensor as T
from sklearn.metrics import confusion_matrix
import fileio
import perceptron
# class Batch:
# def __init__(self, batchsize, shuffle):
# self.batchsize = batchsize
# self.shuffle = shuffle
# def __accumulate(iterable, func=operator.add):
# 'Return running totals'
# # accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# # accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
# it = iter(iterable)
# try:
# total = next(it)
# except StopIteration:
# return
# yield total
# for element in it:
# total = func(total, element)
# yield total
# def Iterate(inputs, targets, fn):
# __accumulate =
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
data = fileio.CSVReader("train.csv", "test.csv")
data.Read()
batchsize = 500
mlp = perceptron.SimpleMLP(batchsize, 477, 0.2, [477+6, 477+6], [0.5, 0.5],
lasagne.nonlinearities.rectify, 6)
print(mlp)
mlp.BuildNetwork()
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
num_epochs=3
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(data.train_X, data.train_Y, batchsize, shuffle=False):
inputs, targets = batch
train_err += mlp.train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(data.test_X, data.test_Y, batchsize, shuffle=False):
inputs, targets = batch
err, acc = mlp.val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(data.test_X, data.test_Y, batchsize, shuffle=False):
inputs, targets = batch
err, acc = mlp.val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Predict the first 500 (batchsize)
y_pred = mlp.Predict(data.test_X[0:batchsize])
print("OOS Confusion Matrix (first %d):" % batchsize)
print(confusion_matrix(data.test_Y[0:batchsize], y_pred))
# Drop to a console for further study.
import code
code.interact(local=locals()) | mit |
crichardson17/starburst_atlas | HighResSims/Old/Baseline_Dusty_supersolar_5solar_cutat17/Baseline_plotter.py | 1 | 12649 | ############################################################
############# Plotting File for Contour Plots ##############
################## Data read from Cloudy ###################
################ Helen Meskhidze, Fall 2015 ################
#################### Elon University #######################
#------------------------------------------------------------------------------------------------------
'''
The inputs this code takes are .grd and .txt files from Cloudy.
It can take in as many input files (in case you have a grid and haven't concatenated all the files)- just change the numFiles value
This code outputs a set of contour plots, saved to the working directory
'''
#------------------------------------------------------------------------------------------------------
#Packages importing
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
import time
# ------------------------------------------------------------------------------------------------------
# keep track of how long the code takes to run
t0 = time.clock()
headerloc = "/Users/helen/Documents/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 8 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("padova_cont_5_{:d}_supersolar_highres.2.grd".format(i+1)):
gridFiles[i] = file
print file
if file.endswith("padova_cont_5_{:d}_supersolar_highres.2emissionlines_abs.txt".format(i+1)):
emissionFiles[i] = file
print file
print ("Files names constructed")
# ------------------------------------------------------------------------------------------------------
#Patches data
#this section adds the rectangles on the plots of the three other studies
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.)] # ignored
codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY]
path = Path(verts, codes)
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.)] # ignored
path = Path(verts, codes)
path2 = Path(verts2, codes)
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.)] # ignored
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the patches routine: to add patches for others peoples' data onto our plots.
#Adds patches to the first subplot
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='blue', lw=0)
patch = patches.PathPatch(path, facecolor='grey', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
plt.figure(figsize=(12,10))
def add_sub_plot(sub_num, elinesplot):
numplots = 16
plt.subplot(numplots/4.,4,sub_num) #row, column
#choose which z array, then which subplot
z_subnum = z_total[elinesplot]
z_line = z_subnum[:,:,sub_num-1]
contour1 = plt.contour(x_axis, y_axis, z_line, levels, colors='k', origin='lower', extent=extent) #teal contours, dashed
contourmap = plt.imshow(z_line, cmap='Reds', extent= extent, aspect = "auto",origin='lower', vmin=0, vmax =4)
plt.scatter(max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[elinesplot][sub_num-1]], xy=(8,11), xytext=(6.5,8.5), fontsize = 10)
plt.annotate(max_values[line[elinesplot][sub_num-1],0], xy = (max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3]),
xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10, color='k')
if sub_num == 4:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.5,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 8:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 12:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 0:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
#if sub_num == (4,8,12,16):
#axColor = plt.axes([7,7.5,0,0.5])
#axis limits
yt_min = 8 ; yt_max = 17; xt_min = 0; xt_max = 10
plt.ylim(yt_min,yt_max); plt.xlim(xt_min,xt_max)
#ticks
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
#axes labels
if sub_num == 0:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 12:
plt.tick_params(labelbottom = 'off')
if sub_num%(numplots/4) == 1:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
else:
plt.tick_params(labelleft = 'off')
if sub_num > 12:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
#else:
# plt.tick_params(labelbottom = 'off')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
#to print progress to the terminal
if sub_num == numplots/2:
print("half the sub-plots of plot{:d} are complete".format(elinesplot+1))
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
# ---------------------------------------------------
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 10.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#select the scaling factor
#for 4860
incidentnum = 58 #reference index of 4860
incidentline = 4860. #wavelength
incident = Emissionlines[:,58]
print("Scaling data")
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10) > 0:
concatenated_data[i,j] = math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10)
else:
concatenated_data[i,j] == 0
print("Finding peaks")
#find the maxima (having cut the arrays already) to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print("Data arranged")
# ---------------------------------------------------
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines to plot here! indexes of desired lines
line = [
#UV1Lines
[0, 1, 2, 3, 5, 165, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
#977, 991, 1026, 1216, 1218, 1239, 1240, 1243, 1263, 1304, 1308, 1397, 1402, 1406, 1486, 1531
#UV2line
[16, 17, 18, 19, 20, 21, 23, 24, 25, 27, 29, 30,31, 32, 33, 34],
#1549, 1640, 1665, 1671, 1750, 1860, 1888, 1907, 2297, 2321, 2471, 2326, 2335, 2665, 2798
#Optical Lines
[36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52],
#NE 3 3343A, NE 5 3426, 3646, 3726, 3727, 3729, 3869, 3889, 3933, 4026, 4070, 4074, 4078, 4102, 4340, 4363
#Optical Lines 2
[53, 55, 56, 57, 59, 60, 61, 64, 65, 66, 67, 68, 69, 70, 71, 73],
#NE 4 4720A, AR 4 4740, 4861, O III 4959, O 3 5007, O 1 5577, N 2 5755, HE 1 5876, O 1 6300;
#S 3 6312, O 1 6363, H 1 6563, N 2 6584, S II 6716, S 2 6720, S II 6731
#IR Lines
[75, 76, 77, 78, 79, 80, 81, 82, 84, 83, 85, 86, 87, 88, 89, 90],
#AR 5 7005A, AR 3 7135A, TOTL 7325A, AR 3 7751, 6LEV 8446, CA2X 8498, CA2Y 8542, CA2Z 8662;
#CA 2 8579A, S 3 9069, H 1 9229, S 3 9532... H 1 9546
#More Lines
[97,112, 107, 110, 108, 111, 106, 109, 104, 101, 102, 105, 99, 103, 98, 100],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
]
# ---------------------------------------------------
Nx = len(np.where(y == y[0])[0])
Ny = len(np.where(x == x[0])[0])
x_axis = x[0:Nx]
y_axis = np.unique(y)
extent = [min(x_axis),max(x_axis),min(y_axis),max(y_axis)]
# ---------------------------------------------------
z_total = [None] * (len(line)-1)
#create z array for this plot
for i in range(len(z_total)):
zi1 = [concatenated_data[:,line[i]]]
zi2 = np.reshape(zi1,(Ny,Nx,16))
z_total[i] = zi2
# ---------------------------------------------------
#plotting features (and contour levels)
#remove space between plots
#levels = arange(10**-1,10, .2) #teal levels
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-2,10**2, 1) #black levels
# ---------------------------------------------------
#loop through desired plots and desired subplots
print("Beginning plotting")
plt.clf()
for j in range (len(z_total)):
for i in range(16):
add_sub_plot(i,j)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
#plt.show()
plt.savefig(("Full_lines_%d.pdf")%j)
print("plot {:d} complete".format(j+1))
plt.clf()
if (time.clock() - t0) > 120:
print(time.clock() - t0)/60., "minutes process time"
else:
print(time.clock() - t0, "seconds process time")
| gpl-2.0 |
aetilley/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 93 | 3460 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
Tjorriemorrie/trading | 09_scalping/features.py | 2 | 11161 | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
import sklearn as sk
import operator
from pprint import pprint
class FeatureFactory():
def ema(self, s, n):
""" returns an n period exponential moving average for the time series s
s is a list ordered from oldest (index 0) to most recent (index -1)
n is an integer
returns a numeric array of the exponential moving average """
s = np.array(s).astype(float)
ema = []
j = 1
# get n sma first and calculate the next n period ema
sma = sum(s[:n]) / n
multiplier = 2 / float(1 + n)
ema[:0] = [sma] * n
# EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)
ema.append(( (s[n] - sma) * multiplier) + sma)
# now calculate the rest of the values
for i in s[n + 1:]:
tmp = ( (i - ema[j]) * multiplier) + ema[j]
ema.append(tmp)
j = j + 1
# print "ema length = " + str(len(ema))
return ema
def rsi(self, closes, n):
"""
RSI = 100 - 100/(1 + RS*)
*Where RS = Average of x days' up closes / Average of x days' down closes.
"""
# print '\ncloses'
# print len(closes)
delta = np.diff(closes)
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = pd.rolling_mean(dUp, n)
RolDown = np.absolute(pd.rolling_mean(dDown, n))
RS = RolUp / RolDown
RS[0:n-1] = 0
RS = np.insert(RS, 0, 0)
# print '\nRS'
# print len(RS)
# print RS[0:20]
rsiCalc = lambda x: 100 - 100 / (1 + x)
rsi = [rsiCalc(rs) for rs in RS]
# print '\nrsi'
# print len(rsi)
# print np.array(rsi).astype(int)
return rsi
def extractChiMoku(self, highs, lows, closes):
tenkanSen = []
kijunSen = []
senkouSpanB = []
for i in xrange(len(highs)):
# avg of highest high and lowest low over past 9 ticks
tenkanSenHigh = max(highs[max(0, i-9):i+1])
tenkanSenLow = min(lows[max(0, i-9):i+1])
tenkanSen.append((tenkanSenHigh + tenkanSenLow) / 2)
# avg of highest high and lowest low over past 26 ticks
kijunSenHigh = max(highs[max(0, i-26):i+1])
kijunSenLow = min(lows[max(0, i-26):i+1])
kijunSen.append((kijunSenHigh + kijunSenLow) / 2)
# (Highest high + Lowest low) / 2 over the last 52 trading days plotted 26 days ahead.
senkouSpanBHigh = max(highs[max(0, i-52):i+1])
senkouSpanBLow = min(lows[max(0, i-52):i+1])
senkouSpanB.append((senkouSpanBHigh + senkouSpanBLow) / 2)
# (Tenkan Sen + Kijun Sen) / 2 plotted 26 days ahead.
senkouSpanA = [(tenkanSen[0] + kijunSen[0]) / 2] * 256
senkouSpanA.extend([(t + k) / 2 for t, k in zip(tenkanSen, kijunSen)])
senkouSpanA = senkouSpanA[:len(highs)]
# The closing price plotted 26 trading days behind.
chikouSpan = [closes[0]] * 26
chikouSpan.extend(closes)
chikouSpan = chikouSpan[:len(highs)]
# pprint(tenkanSen[-5:])
# pprint(kijunSen[-5:])
# pprint(senkouSpanA)
return tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
def getNames(self):
names = [
'close/ema89', 'close/ema55', 'close/ema34', 'close/ema21', 'close/ema13', 'close/ema08',
'ema08/ema89', 'ema08/ema55', 'ema08/ema34', 'ema08/ema21', 'ema08/ema13',
'ema13/ema89', 'ema13/ema55', 'ema13/ema34', 'ema13/ema21',
'ema21/ema89', 'ema21/ema55', 'ema21/ema34',
'ema34/ema89', 'ema34/ema55',
'ema55/ema89',
# 'volume/ema20v', 'volume/ema8v', 'volume/ema5v',
# 'ema5v/ema20v', 'ema5v/ema8v',
# 'ema8v/ema20v',
'topShadow/topShadowsMean',
'botShadow/botShadowsMean',
# RSI
'close > rsi21', 'close > rsi34', 'close > rsi55', 'rsi21 > rsi34', 'rsi21 > rsi55', 'rsi34 > rsi55',
'close < rsi21', 'close < rsi34', 'close < rsi55', 'rsi21 < rsi34', 'rsi21 < rsi55', 'rsi34 < rsi55',
# chimoku
'tenkanKijunBullishWeak', 'tenkanKijunBullishNeutral', 'tenkanKijunBullishStrong',
'tenkanKijunBearishWeak', 'tenkanKijunBearishNeutral', 'tenkanKijunBearishStrong',
'kijunPriceBullishWeak', 'kijunPriceBullishNeutral', 'kijunPriceBullishStrong',
'kijunPriceBearishWeak', 'kijunPriceBearishNeutral', 'kijunPriceBearishStrong',
'kumoBullish', 'kumoBearish',
'senkouSpanBullishWeak', 'senkouSpanBullishNeutral', 'senkouSpanBullishStrong',
'senkouSpanBearishWeak', 'senkouSpanBearishNeutral', 'senkouSpanBearishStrong',
]
return names
def getFeatures(self, opens, highs, lows, closes, volumes):
ema08s = self.ema(closes, 8)
ema13s = self.ema(closes, 13)
ema21s = self.ema(closes, 21)
ema34s = self.ema(closes, 34)
ema55s = self.ema(closes, 55)
ema89s = self.ema(closes, 89)
# ema5vs = self.ema(volumes, 5)
# ema8vs = self.ema(volumes, 8)
# ema20vs = self.ema(volumes, 20)
topShadows = [high - max(open, close) for open, high, close in zip(opens, highs, closes)]
topShadowsMean = np.mean(topShadows)
botShadows = [min(open, close) - low for open, low, close in zip(opens, lows, closes)]
botShadowsMean = np.mean(botShadows)
rsi21s = self.rsi(closes, 21)
rsi34s = self.rsi(closes, 34)
rsi55s = self.rsi(closes, 55)
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan = self.extractChiMoku(highs, lows, closes)
data = [
[
# EMA
close / ema89, close / ema55, close / ema34, close / ema21, close / ema13, close / ema08,
ema08 / ema89, ema08 / ema55, ema08 / ema34, ema08 / ema21, ema08 / ema13,
ema13 / ema89, ema13 / ema55, ema13 / ema34, ema13 / ema21,
ema21 / ema89, ema21 / ema55, ema21 / ema34,
ema34 / ema89, ema34 / ema55,
ema55 / ema89,
# volume / ema20v, volume / ema8v, volume / ema5v,
# ema5v / ema20v, ema5v / ema8v,
# ema8v / ema20v,
topShadow / topShadowsMean,
botShadow / botShadowsMean,
# RSI
# bullish
1 if close > rsi21 else 0,
1 if close > rsi34 else 0,
1 if close > rsi55 else 0,
1 if rsi21 > rsi34 else 0,
1 if rsi21 > rsi55 else 0,
1 if rsi34 > rsi55 else 0,
# bearish
1 if close < rsi21 else 0,
1 if close < rsi34 else 0,
1 if close < rsi55 else 0,
1 if rsi21 < rsi34 else 0,
1 if rsi21 < rsi55 else 0,
1 if rsi34 < rsi55 else 0,
# TENKAN & KIJUN
# weak bullish
1 if tenkanSen > kijunSen and kijunSen < senkouSpanA else 0,
# neutral bullish
1 if tenkanSen > kijunSen and senkouSpanA > kijunSen > senkouSpanB else 0,
# strong bullish
1 if tenkanSen > kijunSen and kijunSen > senkouSpanA else 0,
# weak bearish
1 if tenkanSen < kijunSen and kijunSen > senkouSpanA else 0,
# neutral bearish
1 if tenkanSen < kijunSen and senkouSpanA < kijunSen < senkouSpanB else 0,
# strong bearish
1 if tenkanSen < kijunSen and kijunSen < senkouSpanA else 0,
# KIJUN & PRICE
# weak bullish
1 if close > kijunSen and kijunSen < senkouSpanA else 0,
# neutral bullish
1 if close > kijunSen and senkouSpanA > kijunSen > senkouSpanB else 0,
# strong bullish
1 if close > kijunSen and kijunSen > senkouSpanA else 0,
# weak bearish
1 if close < kijunSen and kijunSen > senkouSpanA else 0,
# neutral bearish
1 if close < kijunSen and senkouSpanA < kijunSen < senkouSpanB else 0,
# strong bearish
1 if close < kijunSen and kijunSen < senkouSpanA else 0,
# KUMO BREAKOUT
# bullish
1 if close > senkouSpanA else 0,
# bearish
1 if close < senkouSpanA else 0,
# SENKOU SPAN
# weak bullish
1 if senkouSpanA > senkouSpanB and close < senkouSpanA else 0,
# neutral bullish
1 if senkouSpanA > senkouSpanB and senkouSpanA > close > senkouSpanB else 0,
# strong bullish
1 if senkouSpanA > senkouSpanB and close > senkouSpanA else 0,
# weak bearish
1 if senkouSpanA < senkouSpanB and close > senkouSpanA else 0,
# neutral bearish
1 if senkouSpanA < senkouSpanB and senkouSpanA < close < senkouSpanB else 0,
# strong bearish
1 if senkouSpanA < senkouSpanB and close < senkouSpanA else 0,
]
for close,
ema08, ema13, ema21, ema34, ema55, ema89,
# volume, ema5v, ema8v, ema20v,
topShadow, botShadow,
rsi21, rsi34, rsi55,
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
in zip(closes,
ema08s, ema13s, ema21s, ema34s, ema55s, ema89s,
# volumes, ema5vs, ema8vs, ema20vs,
topShadows, botShadows,
rsi21s, rsi34s, rsi55s,
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
)
]
# print data
return data
def calculateTargets(df):
#print 'calculating targets'
targetsBull = []
targetsBear = []
sampleSize = 5 * 2
for pos, d in df.iterrows():
close = d['close']
#print 'pos', pos, close
dfSample = df.iloc[pos:pos + sampleSize]
#print dfSample
# get highest score for bull
bullHighestHighIndex = dfSample['high'].idxmax()
bullHighestHigh = dfSample.ix[bullHighestHighIndex]['high']
#print 'bull highest high', bullHighestHighIndex, bullHighestHigh
targetsBull.append(bullHighestHigh - close)
# get lowest score for bear
bearLowestLowIndex = dfSample['low'].idxmin()
bearLowestLow = dfSample.ix[bearLowestLowIndex]['low']
#print 'bear lowest low', bearLowestLowIndex, bearLowestLow
targetsBear.append(bearLowestLow - close)
df['targetBull'] = targetsBull
df['targetBear'] = targetsBear | mit |
adammenges/statsmodels | statsmodels/datasets/elnino/data.py | 25 | 1779 | """El Nino dataset, 1950 - 2010"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is in the public domain."""
TITLE = """El Nino - Sea Surface Temperatures"""
SOURCE = """
National Oceanic and Atmospheric Administration's National Weather Service
ERSST.V3B dataset, Nino 1+2
http://www.cpc.ncep.noaa.gov/data/indices/
"""
DESCRSHORT = """Averaged monthly sea surface temperature - Pacific Ocean."""
DESCRLONG = """This data contains the averaged monthly sea surface
temperature in degrees Celcius of the Pacific Ocean, between 0-10 degrees South
and 90-80 degrees West, from 1950 to 2010. This dataset was obtained from
NOAA.
"""
NOTE = """::
Number of Observations - 61 x 12
Number of Variables - 1
Variable name definitions::
TEMPERATURE - average sea surface temperature in degrees Celcius
(12 columns, one per month).
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the El Nino data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The elnino Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/elnino.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/pylab_examples/fancyarrow_demo.py | 12 | 1386 | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
styles = mpatches.ArrowStyle.get_styles()
ncol=2
nrow = (len(styles)+1) // ncol
figheight = (nrow+0.5)
fig1 = plt.figure(1, (4.*ncol/1.5, figheight/1.5))
fontsize = 0.2 * 70
ax = fig1.add_axes([0, 0, 1, 1], frameon=False, aspect=1.)
ax.set_xlim(0, 4*ncol)
ax.set_ylim(0, figheight)
def to_texstring(s):
s = s.replace("<", r"$<$")
s = s.replace(">", r"$>$")
s = s.replace("|", r"$|$")
return s
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
x = 3.2 + (i//nrow)*4
y = (figheight - 0.7 - i%nrow) # /figheight
p = mpatches.Circle((x, y), 0.2, fc="w")
ax.add_patch(p)
ax.annotate(to_texstring(stylename), (x, y),
(x-1.2, y),
#xycoords="figure fraction", textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=dict(arrowstyle=stylename,
patchB=p,
shrinkA=5,
shrinkB=5,
fc="w", ec="k",
connectionstyle="arc3,rad=-0.05",
),
bbox=dict(boxstyle="square", fc="w"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.draw()
plt.show()
| mit |
jasonabele/gnuradio | gr-msdd6000/src/python-examples/msdd_spectrum_sense.py | 8 | 10553 | #!/usr/bin/env python
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, eng_notation, optfir, window
from gnuradio import msdd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import math
import struct
from pylab import *
from numpy import array
import time
matplotlib.interactive(True)
matplotlib.use('TkAgg')
class tune(gr.feval_dd):
"""
This class allows C++ code to callback into python.
"""
def __init__(self, tb):
gr.feval_dd.__init__(self)
self.tb = tb
def eval(self, ignore):
"""
This method is called from gr.bin_statistics_f when it wants to change
the center frequency. This method tunes the front end to the new center
frequency, and returns the new frequency as its result.
"""
try:
# We use this try block so that if something goes wrong from here
# down, at least we'll have a prayer of knowing what went wrong.
# Without this, you get a very mysterious:
#
# terminate called after throwing an instance of 'Swig::DirectorMethodException'
# Aborted
#
# message on stderr. Not exactly helpful ;)
new_freq = self.tb.set_next_freq()
return new_freq
except Exception, e:
print "tune: Exception: ", e
class parse_msg(object):
def __init__(self, sample_rate, percent, alpha=0.01):
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
self.fig = figure(1, facecolor="w", figsize=(12,9))
self.sp = self.fig.add_subplot(1,1,1)
self.pl = self.sp.plot(range(100), 100*[1,])
params = {'backend': 'ps',
'xtick.labelsize': self.axis_font_size,
'ytick.labelsize': self.axis_font_size,
'text.usetex': False}
rcParams.update(params)
self.sp.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp.set_ylabel("Magnitude (dB)", fontsize=self.label_font_size, fontweight="bold")
self.text_alpha = figtext(0.10, 0.94, ('Moving average alpha: %s' % alpha), weight="heavy", size=self.text_size)
self.cfreqs = list()
self.freqrange = list()
self.data = list() #array('f')
self.alpha = alpha
self.index = 0
self.full = False
self.last_cfreq = 0
self.sample_rate = sample_rate
self.percent = (1.0-percent)/2.0
def parse(self, msg):
self.center_freq = msg.arg1()
self.vlen = int(msg.arg2())
assert(msg.length() == self.vlen * gr.sizeof_float)
if(self.center_freq < self.last_cfreq):
print "Plotting spectrum\n"
self.full = True
self.pl[0].set_data([self.freqrange, self.data])
self.sp.set_ylim([min(self.data), max(self.data)])
self.sp.set_xlim([min(self.freqrange), max(self.freqrange)])
draw()
self.index = 0
del self.freqrange
self.freqrange = list()
#raw_input()
self.last_cfreq = self.center_freq
startind = int(self.percent * self.vlen)
endind = int((1.0 - self.percent) * self.vlen)
fstep = self.sample_rate / self.vlen
f = [self.center_freq - self.sample_rate/2.0 + i*fstep for i in range(startind, endind)]
self.freqrange += f
t = msg.to_string()
d = struct.unpack('%df' % (self.vlen,), t)
if self.full:
for i in range(startind, endind):
self.data[self.index] = (1.0-self.alpha)*self.data[self.index] + (self.alpha)*d[i]
self.index += 1
else:
self.data += [di for di in d[startind:endind]]
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
usage = "usage: %prog [options] host min_freq max_freq"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("", "--tune-delay", type="eng_float", default=5e-5, metavar="SECS",
help="time to delay (in seconds) after changing frequency [default=%default]")
parser.add_option("", "--dwell-delay", type="eng_float", default=50e-5, metavar="SECS",
help="time to dwell (in seconds) at a given frequncy [default=%default]")
parser.add_option("-F", "--fft-size", type="int", default=256,
help="specify number of FFT bins [default=%default]")
parser.add_option("-d", "--decim", type="intx", default=16,
help="set decimation to DECIM [default=%default]")
parser.add_option("", "--real-time", action="store_true", default=False,
help="Attempt to enable real-time scheduling")
(options, args) = parser.parse_args()
if len(args) != 3:
parser.print_help()
sys.exit(1)
self.address = args[0]
self.min_freq = eng_notation.str_to_num(args[1])
self.max_freq = eng_notation.str_to_num(args[2])
self.decim = options.decim
self.gain = options.gain
if self.min_freq > self.max_freq:
self.min_freq, self.max_freq = self.max_freq, self.min_freq # swap them
self.fft_size = options.fft_size
if not options.real_time:
realtime = False
else:
# Attempt to enable realtime scheduling
r = gr.enable_realtime_scheduling()
if r == gr.RT_OK:
realtime = True
else:
realtime = False
print "Note: failed to enable realtime scheduling"
adc_rate = 102.4e6
self.int_rate = adc_rate / self.decim
print "Sampling rate: ", self.int_rate
# build graph
self.port = 10001
self.src = msdd.source_simple(self.address, self.port)
self.src.set_decim_rate(self.decim)
self.set_gain(self.gain)
self.set_freq(self.min_freq)
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, self.fft_size)
mywindow = window.blackmanharris(self.fft_size)
fft = gr.fft_vcc(self.fft_size, True, mywindow, True)
power = 0
for tap in mywindow:
power += tap*tap
norm = gr.multiply_const_cc(1.0/self.fft_size)
c2mag = gr.complex_to_mag_squared(self.fft_size)
# FIXME the log10 primitive is dog slow
log = gr.nlog10_ff(10, self.fft_size,
-20*math.log10(self.fft_size)-10*math.log10(power/self.fft_size))
# Set the freq_step to % of the actual data throughput.
# This allows us to discard the bins on both ends of the spectrum.
self.percent = 0.4
self.freq_step = self.percent * self.int_rate
self.min_center_freq = self.min_freq + self.freq_step/2
nsteps = math.ceil((self.max_freq - self.min_freq) / self.freq_step)
self.max_center_freq = self.min_center_freq + (nsteps * self.freq_step)
self.next_freq = self.min_center_freq
tune_delay = max(0, int(round(options.tune_delay * self.int_rate / self.fft_size))) # in fft_frames
dwell_delay = max(1, int(round(options.dwell_delay * self.int_rate / self.fft_size))) # in fft_frames
self.msgq = gr.msg_queue(16)
self._tune_callback = tune(self) # hang on to this to keep it from being GC'd
stats = gr.bin_statistics_f(self.fft_size, self.msgq,
self._tune_callback, tune_delay, dwell_delay)
# FIXME leave out the log10 until we speed it up
self.connect(self.src, s2v, fft, c2mag, log, stats)
def set_next_freq(self):
target_freq = self.next_freq
self.next_freq = self.next_freq + self.freq_step
if self.next_freq >= self.max_center_freq:
self.next_freq = self.min_center_freq
if not self.set_freq(target_freq):
print "Failed to set frequency to", target_freq
return target_freq
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
"""
return self.src.set_rx_freq(0, target_freq)
def set_gain(self, gain):
self.src.set_pga(0, gain)
def main_loop(tb):
msgparser = parse_msg(tb.int_rate, tb.percent)
while 1:
# Get the next message sent from the C++ code (blocking call).
# It contains the center frequency and the mag squared of the fft
msgparser.parse(tb.msgq.delete_head())
# Print center freq so we know that something is happening...
print msgparser.center_freq
# FIXME do something useful with the data...
# m.data are the mag_squared of the fft output (they are in the
# standard order. I.e., bin 0 == DC.)
# You'll probably want to do the equivalent of "fftshift" on them
# m.raw_data is a string that contains the binary floats.
# You could write this as binary to a file.
if __name__ == '__main__':
tb = my_top_block()
try:
tb.start() # start executing flow graph in another thread...
main_loop(tb)
except KeyboardInterrupt:
pass
| gpl-3.0 |
raghavrv/scikit-learn | sklearn/datasets/samples_generator.py | 8 | 56767 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([rng.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng)])
out = sample_without_replacement(2 ** dimensions, samples,
random_state=rng).astype(dtype='>u4',
copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components : int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
awanke/bokeh | examples/glyphs/trail.py | 33 | 4656 | # -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.widgets import VBox
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5*theta)**2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1)*cos(phi2)*haversin(delta_lon)
return 2*R*atan2(sqrt(a), sqrt(1-a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([ distance(latlon[i+1], latlon[i]) for i in range(len((latlon[:-1]))) ])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100*np.diff(df.alt)/(1000*dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
title = "Obiszów MTB XCM"
def trail_map(data):
lon = (min(data.lon) + max(data.lon))/2
lat = (min(data.lat) + max(data.lat))/2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(title="%s - Trail Map" % title, map_options=map_options, plot_width=800, plot_height=800)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
def altitude_profile(data):
plot = Plot(title="%s - Altitude Profile" % title, plot_width=800, plot_height=400)
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs = [ [X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys = [ [y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color = data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(
x = data.dist,
y = data.alt,
))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = VBox(children=[altitude, trail])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename) | bsd-3-clause |
hugobowne/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
larsmans/scikit-learn | examples/svm/plot_svm_nonlinear.py | 61 | 1089 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learn by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
eg-zhang/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
jgphpc/linux | slurm/crayvis/crayvis_pmessmer.py | 1 | 4263 | #!/usr/bin/env python3
# Thanks goes to Peter Messmer at NVIDIA
# mll daint-gpu PyExtensions/3.6.1.1-CrayGNU-17.08
# https://github.com/eth-cscs/pyfr/issues/11
# Gray are the compute nodes
# Yellow the service nodes
# Blue the nodes allocated in the run
# Red the failed node
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import re
import sys
# 2018/06: row0=mc rows1-3=gpu, row3=8cabs only
# hostlist -e `sacct -j 1323038 -o nodelist -P |grep -m1 ^nid` > nids.daint
# first=c0-0c0s0n1 last=s7-3c2s15n3 / c(0-9)-(0-3)c(0-2)s(0-15)n(0-3)
# cab row chassis blade cn
# <= 10 *4 *3 *16 *4 = 7680 (delta=551)
# grep service xtprocadmin.daint |wl # = 117
# grep compute xtprocadmin.daint |wl # = 7129
# grep -v NODENAME xtprocadmin.daint |wl # = 7246
## 1 electrical group = 384cn
# ## --- cabinets:
# 741 c0
# 753 c1
# 737 c2
# 753 c3
# 745 c4
# 757 c5
# 759 c6
# 768 c7
# 564 c8
# 552 c9
# average = 712.9 cn / cabinet
#
# ## --- rows:
# 1809 0
# 1880 1
# 1904 2
# 1536 3
# average = 1782.25 cn / row
#old daint-: 5320n=52services+5268cn
#old 566 c0
#old 570 c1
#old 564 c2
#old 576 c3
#old 564 c4
#old 576 c5
#old 566 c6
#old 576 c7
#old 378 c8
#old 384 c9
#old c6-0c0s1n3: cab0-9 row0-2 chassis0-2 slotblade0-15 cn0-3
#old <= 10*3*3*16*4 = 5760cn
#old santis: cab0-0 row0-0 chassis0-2 slotblade0-15 cn0-3
#old <= 1*1*3*16*4 = 192cn
cpuDx = 4
cpuDy = 4
slotDx = 4 * cpuDx + 2
slotDy = 1 * cpuDy + 2
cageDx = 1 * slotDx + 4
cageDy = 16 * slotDy + 2
cabinetDx = 4 * cageDx + 6
cabinetDy = 1 * cageDy + 6
# fileTopo = 'DaintTopo.txt'
#fileTopo = 'santistopo.txt'
fileTopo = 'xtprocadmin.daint'
fileNodes= 'nodes.txt'
#fileImage= 'image.png'
def parseTopo(filename, nodes):
f = open(filename, 'r')
f.readline()
for line in f:
words = line.split()
nid = int(words[0])
nodes[nid] = [words[0],words[2], words[3], words[4]]
f.close()
def posToCoord(p):
return re.findall(r'(\d+)', p)
def initNodeArray():
#
nodeArray = np.zeros([4 * cabinetDx, 10 * cabinetDy, 3])
#nodeArray = np.zeros([3 * cabinetDx, 10 * cabinetDy, 3])
return nodeArray
def paintNode(nA, node, color):
(x, y, g, s, n) = posToCoord(node)
px = int(y) * cabinetDx + int(g) * cageDx + int(n) * cpuDx
py = int(x) * cabinetDy + int(s) * slotDy
#print(node, x, y, g, s, n)
nA[px:px+cpuDx-1, py:py+cpuDy-1, :] = color
def paintTopo(nA, node):
if node[2] == "compute":
color = [0.5, 0.5, 0.5]
elif node[2] == "service":
color = [1, 1, 0]
else:
color = [1, 0, 0]
paintNode(nA, node[1], color)
#print(node, x, y, g, s, n, px, py)
def paintNodeList(nA, nodes):
#f = open('nodes.txt', 'r')
f = open(fileNodes, 'r')
for line in f:
n = int(line.strip())
nodeStr = nodes[n][1]
color = [0, 1, 1]
#print( n)
print(nodes[n])
paintNode(nA, nodeStr, color)
def paintFaultNodes(nA, nodes, faultNodes):
for i in faultNodes:
n = int(i)
nodeStr = nodes[n][1]
color = [1, 0, 0]
paintNode(nA, nodeStr, color)
#---
# python crayvis_pmessmer.py nodes.txt eff.png
# nodes.txt
# eff.png
# []
# ['10', 'c0-0c0s2n2', 'compute', 'up']
# ['0', '0', '0', '2', '2']
args = []
for i in sys.argv:
args.append(i)
args.pop(0)
fileTopo = args.pop(0)
fileNodes = args.pop(0)
fileImage = args.pop(0)
print('in1=', fileTopo)
print('in2=', fileNodes)
print('out=', fileImage)
faultNodes = args
print('fault=',faultNodes)
nodes = dict()
#parseTopo('DaintTopo.txt', nodes)
parseTopo(fileTopo, nodes)
#print(nodes)
#ok print('cn[10]=', nodes[10])
#ok print('cn[10]=', posToCoord(nodes[10][1]))
nodeArray = initNodeArray()
for n in nodes:
#print(nodes[n])
paintTopo(nodeArray, nodes[n])
paintNodeList(nodeArray, nodes)
paintFaultNodes(nodeArray, nodes, faultNodes)
# bof:
#plt.figure()
#plt.imshow(nodeArray)
#plt.text(5, 95, "Don't use Jet!", color="white", fontsize=10)
##plt.show()
#plt.savefig('eff.png')
#quit()
#ok:
#img.imsave("out.png", nodeArray)
img.imsave(fileImage, nodeArray)
| gpl-2.0 |
ronojoy/BDA_py_demos | demos_ch6/demo6_2.py | 19 | 1366 | """Bayesian Data Analysis, 3rd ed
Chapter 6, demo 2
Posterior predictive checking
Binomial example - Testing sequential dependence example
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Testing sequential dependence example (Gelman et al p. 163)
y = np.array([1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
Ty = np.count_nonzero(np.diff(y))
# sufficient statistics
n = len(y)
s = y.sum()
nsamp = 10000
t = np.random.beta(s+1, n-s+1, size=nsamp)
yr = np.random.rand(n, nsamp) < t
# sadly np.count_nonzero does not (yet) support axis parameter
Tyr = (np.diff(yr, axis=0) != 0).sum(axis=0)
# ====== plot
plt.hist(Tyr, np.arange(19), align='left', label='$T(y_\mathrm{rep})$')
plt.axvline(Ty, color='#e41a1c', label='$T(y)$')
plt.yticks(())
plt.xlim((-0.5,17.5))
plt.title('Binomial example - number of changes? \n'
r'$\operatorname{Pr}(T(y_\mathrm{rep},\theta) \leq T(y,\theta)|y) = 0.03$')
plt.legend()
# make room for the title
axis = plt.gca()
box = axis.get_position()
axis.set_position([box.x0, box.y0, box.width, box.height * 0.9])
plt.show()
| gpl-3.0 |
JingheZ/shogun | applications/tapkee/faces_embedding.py | 26 | 2078 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2011 Sergey Lisitsyn
# Copyright (C) 2011 Sergey Lisitsyn
from modshogun import *
from numpy import *
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import re,os,time
from pylab import *
def build_features(path):
files = os.listdir(path)
files.remove('README')
N = len(files)
(nd,md) = imread(os.path.join(path,files[0])).shape
dim = nd*md
feature_matrix = zeros([dim,N])
for i,filename in enumerate(files):
feature_matrix[:,i] = imread(os.path.join(path,filename)).ravel()
return nd,md,RealFeatures(feature_matrix)
path = '../../data/faces/'
converter = DiffusionMaps
nd,md,features = build_features(path)
converter_instance = converter()
converter_instance.set_t(5)
converter_instance.set_target_dim(2)
start = time.time()
new_features = converter_instance.embed(features).get_feature_matrix()
print new_features.shape
end = time.time()
clusterer = KMeans
clusterer_instance = clusterer(2,EuclideanDistance())
clusterer_instance.train(features)
labels = clusterer_instance.apply().get_labels()
print labels
print 'applied %s, took %fs' % (converter_instance.get_name(), end-start)
print 'plotting'
fig = figure()
ax = fig.add_subplot(111,axisbg='#ffffff')
ax.scatter(new_features[0],new_features[1],color='black')
import random
for i in range(len(new_features[0])):
feature_vector = features.get_feature_vector(i)
Z = zeros([nd,md,4])
Z[:,:,0] = 255-feature_vector.reshape(nd,md)[::-1,:]
Z[:,:,1] = Z[:,:,0]
Z[:,:,2] = Z[:,:,0]
for k in range(nd):
for j in range(md):
Z[k,j,3] = pow(sin(k*pi/nd)*sin(j*pi/md),0.5)
imagebox = OffsetImage(Z,cmap=cm.gray,zoom=0.25)
ab = AnnotationBbox(imagebox, (new_features[0,i],new_features[1,i]),
pad=0.001,frameon=False)
ax.add_artist(ab)
axis('off')
savefig('faces.png')
show()
| gpl-3.0 |
AIML/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/lib/function_base.py | 30 | 124613 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| mit |
cdegroc/scikit-learn | examples/plot_permutation_test_for_classification.py | 5 | 2319 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD
print __doc__
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
from sklearn.metrics import zero_one_score
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(svm, X, y,
zero_one_score, cv=cv,
n_permutations=100, n_jobs=1)
print "Classification score %s (pvalue : %s)" % (score, pvalue)
###############################################################################
# View histogram of permutation scores
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#pl.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
pl.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
| bsd-3-clause |
rubind/SimpleBayesJLA | plot_cosmo.py | 1 | 2375 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import pickle
from IPython import embed
plt.rcParams["font.family"] = "serif"
def reflect(samps, othersamps = None, reflect_cut = 0.2):
the_min = min(samps)
the_max = max(samps)
inds = np.where((samps < the_min*(1. - reflect_cut) + the_max*reflect_cut) & (samps > the_min))
pad_samples = np.concatenate((samps, the_min - (samps[inds] - the_min)))
if othersamps != None:
pad_other = np.concatenate((othersamps, othersamps[inds]))
inds = np.where((samps > the_min*reflect_cut + the_max*(1. - reflect_cut)) & (samps < the_max))
pad_samples = np.concatenate((pad_samples, the_max + (the_max - samps[inds])))
if othersamps != None:
pad_other = np.concatenate((pad_other, othersamps[inds]))
return pad_samples, pad_other
return pad_samples
def reflect_2D(samps1, samps2, reflect_cut = 0.2):
pad_samps1, pad_samps2 = reflect(samps1, samps2, reflect_cut = reflect_cut)
pad_samps2, pad_samps1 = reflect(pad_samps2, pad_samps1, reflect_cut = reflect_cut)
return pad_samps1, pad_samps2
def every_other_tick(ticks):
"""Matplotlib loves tick labels!"""
labels = []
for i in range(len(ticks) - 1):
if i % 2 == len(ticks) % 2:
labels.append(ticks[i])
else:
labels.append("")
labels.append("")
return labels
contours = [0.317311, 0.0455003]
grayscales = np.linspace(0.8, 0.4, len(contours))
colors = [[item]*3 for item in grayscales]
samples = pickle.load(open('./results.pickle', 'rb'))
om = samples[1]['Om']
ol = samples[1]['OL']
pad_om, pad_ol = reflect_2D(om, ol)
kernel = gaussian_kde(np.array([pad_om, pad_ol]), bw_method=0.1)
xvals, yvals = np.meshgrid(np.linspace(min(om), max(om), 100), np.linspace(min(ol), max(ol), 100))
eval_points = np.array([xvals.reshape(10000), yvals.reshape(10000)])
kernel_eval = kernel(eval_points)
kernel_eval /= kernel_eval.sum()
kernel_sort = np.sort(kernel_eval)
kernel_eval = np.reshape(kernel_eval, (100, 100))
kernel_cum = np.cumsum(kernel_sort)
levels = [kernel_sort[np.argmin(abs(kernel_cum - item))] for item in contours[::-1]]
ax = plt.axes()
ax.contourf(xvals, yvals, kernel_eval, levels = levels + [1], colors = colors)
ax.contour(xvals, yvals, kernel_eval, levels = levels, colors = 'k')
plt.show()
embed() | mit |
aminert/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
idbedead/RNA-sequence-tools | make_geo_list2.py | 2 | 2850 | import os
import pandas as pd
import shutil
import sys
import hashlib
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
pats = ['/Volumes/Seq_data/09142015_BU3_ips17_raw-data/BU3_fastq_renamed']
keep_file = '/Volumes/Drobo/Seq_data/rsem_results_h38_ips17_BU3/Samplelist_cells_new.txt'
insert_size_file = '/Volumes/Seq_data/rsem_results_h38_ips17_BU3_insert_metrics_picard_insert_metrics.txt'
new_filename = 'ips17_BU3_hg19_rsem_geo'
nextera_barcode_v = 1
py3 = sys.version_info[0] > 2
try:
new_filename = os.path.join(os.path.dirname(pats[0]),new_filename)
print("new_filename", new_filename)
os.mkdir(new_filename)
except FileExistsError:
if py3:
response = input("New file already exists. Do you still want to continue (Y/N): ")
else:
response = raw_input("New file already exists. Do you still want to continue (Y/N): ")
if response != 'Y':
sys.exit("Canceled, please rerun with different new_filename.")
keep_df = pd.read_table(open(keep_file,'rU'), sep='\s+', engine='python')
insert_df = pd.read_table(open(insert_size_file,'rU'), sep='\t', engine='python', index_col=0)
samples_to_keep= keep_df["SampleID"].tolist()
#cells_to_keep = [x.split('_')[-2] for x in samples_to_keep]
sample_list_r1 = []
sample_list_r2 = []
md5_r1 = []
md5_r2 = []
sample_folder = []
index_list = []
for path_to_file in pats:
for root, dirnames, filenames in os.walk(path_to_file):
for f in filenames:
if 'fastq.gz' in f:
cell_num = os.path.basename(root)
if cell_num in samples_to_keep:
new_name = os.path.basename(root)
try:
shutil.copy2(root+'/'+f,new_filename)
except FileExistsError:
print("alread moved "+new_name)
if "R1" in f:
sample_list_r1.append(f)
sample_folder.append(new_filename+'/'+new_name)
md5_r1.append(md5(os.path.join(new_filename,f)))
elif "R2" in f:
sample_list_r2.append(f)
index_list.append(cell_num)
md5_r2.append(md5(os.path.join(new_filename,f)))
sample_df = pd.DataFrame.from_dict({'index':index_list,'R1_md5':md5_r1, 'R2_md5':md5_r2, 'folder':sample_folder,'PE1':sample_list_r1, 'PE2':sample_list_r2})
sample_df.set_index('index', inplace=True)
filter_insert_df = insert_df.loc[index_list]
all_df = pd.concat([sample_df,filter_insert_df[['MEAN_INSERT_SIZE','STANDARD_DEVIATION']]], axis=1)
all_df.to_csv(os.path.join(path_to_file,os.path.basename(path_to_file)+'_samples.txt'), sep='\t')
| mit |
jreback/pandas | pandas/compat/pickle_compat.py | 1 | 7903 | """
Support pre-0.12 series pickle compatibility.
"""
import contextlib
import copy
import io
import pickle as pkl
from typing import TYPE_CHECKING, Optional
import warnings
from pandas._libs.tslibs import BaseOffset
from pandas import Index
if TYPE_CHECKING:
from pandas import DataFrame, Series
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except TypeError as err:
# If we have a deprecated function,
# try to replace and try again.
msg = "_reconstruct: First argument must be a sub-type of ndarray"
if msg in str(err):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except TypeError:
pass
elif args and issubclass(args[0], BaseOffset):
# TypeError: object.__new__(Day) is not safe, use Day.__new__()
cls = args[0]
stack[-1] = cls.__new__(*args)
return
raise
_sparse_msg = """\
Loading a saved '{cls}' as a {new} with sparse values.
'{cls}' is now removed. You should re-save this dataset in its new format.
"""
class _LoadSparseSeries:
# To load a SparseSeries as a Series[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "Series", but must return
# a subtype of "_LoadSparseSeries")
def __new__(cls) -> "Series": # type: ignore[misc]
from pandas import Series
warnings.warn(
_sparse_msg.format(cls="SparseSeries", new="Series"),
FutureWarning,
stacklevel=6,
)
return Series(dtype=object)
class _LoadSparseFrame:
# To load a SparseDataFrame as a DataFrame[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "DataFrame", but must
# return a subtype of "_LoadSparseFrame")
def __new__(cls) -> "DataFrame": # type: ignore[misc]
from pandas import DataFrame
warnings.warn(
_sparse_msg.format(cls="SparseDataFrame", new="DataFrame"),
FutureWarning,
stacklevel=6,
)
return DataFrame()
# If classes are moved, provide compat here.
_class_locations_map = {
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
# 15477
("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
# 10890
("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
("pandas.sparse.series", "SparseTimeSeries"): (
"pandas.core.sparse.series",
"SparseSeries",
),
# 12588, extensions moving
("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
# 18543 moving period
("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
("pandas.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
("pandas._libs.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
# 15998 top-level dirs moving
("pandas.sparse.array", "SparseArray"): (
"pandas.core.arrays.sparse",
"SparseArray",
),
("pandas.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.sparse.frame", "SparseDataFrame"): (
"pandas.core.sparse.frame",
"_LoadSparseFrame",
),
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
("pandas.indexes.numeric", "Int64Index"): (
"pandas.core.indexes.numeric",
"Int64Index",
),
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
("pandas.tseries.index", "_new_DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"_new_DatetimeIndex",
),
("pandas.tseries.index", "DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"DatetimeIndex",
),
("pandas.tseries.period", "PeriodIndex"): (
"pandas.core.indexes.period",
"PeriodIndex",
),
# 19269, arrays moving
("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
# 19939, add timedeltaindex, float64index compat from 15998 move
("pandas.tseries.tdi", "TimedeltaIndex"): (
"pandas.core.indexes.timedeltas",
"TimedeltaIndex",
),
("pandas.indexes.numeric", "Float64Index"): (
"pandas.core.indexes.numeric",
"Float64Index",
),
("pandas.core.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.core.sparse.frame", "SparseDataFrame"): (
"pandas.compat.pickle_compat",
"_LoadSparseFrame",
),
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat and uses a non-public class of the pickle module.
# error: Name 'pkl._Unpickler' is not defined
class Unpickler(pkl._Unpickler): # type: ignore[name-defined]
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super().find_class(module, name)
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except (AttributeError, KeyError):
pass
def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
def loads(
bytes_object: bytes,
*,
fix_imports: bool = True,
encoding: str = "ASCII",
errors: str = "strict",
):
"""
Analogous to pickle._loads.
"""
fd = io.BytesIO(bytes_object)
return Unpickler(
fd, fix_imports=fix_imports, encoding=encoding, errors=errors
).load()
@contextlib.contextmanager
def patch_pickle():
"""
Temporarily patch pickle to use our unpickler.
"""
orig_loads = pkl.loads
try:
setattr(pkl, "loads", loads)
yield
finally:
setattr(pkl, "loads", orig_loads)
| bsd-3-clause |
Elarnon/mangaki | mangaki/mangaki/utils/svd.py | 2 | 5410 | from django.contrib.auth.models import User
from mangaki.models import Rating, Work, Recommendation
from mangaki.utils.chrono import Chrono
from mangaki.utils.values import rating_values
from scipy.sparse import lil_matrix
from sklearn.utils.extmath import randomized_svd
import numpy as np
from django.db import connection
import pickle
import json
import math
NB_COMPONENTS = 10
TOP = 10
class MangakiSVD(object):
M = None
U = None
sigma = None
VT = None
chrono = None
inv_work = None
inv_user = None
work_titles = None
def __init__(self):
self.chrono = Chrono(True)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
def load(self, filename):
with open(filename, 'rb') as f:
backup = pickle.load(f)
self.M = backup.M
self.U = backup.U
self.sigma = backup.sigma
self.VT = backup.VT
self.inv_work = backup.inv_work
self.inv_user = backup.inv_user
self.work_titles = backup.work_titles
def fit(self, X, y):
self.work_titles = {}
for work in Work.objects.values('id', 'title'):
self.work_titles[work['id']] = work['title']
work_ids = list(Rating.objects.values_list('work_id', flat=True).distinct())
nb_works = len(work_ids)
self.inv_work = {work_ids[i]: i for i in range(nb_works)}
user_ids = list(User.objects.values_list('id', flat=True))
nb_users = len(user_ids)
self.inv_user = {user_ids[i]: i for i in range(nb_users)}
self.chrono.save('get_work_ids')
# print("Computing M: (%i × %i)" % (nb_users, nb_works))
self.M = lil_matrix((nb_users, nb_works))
"""ratings_of = {}
for (user_id, work_id), rating in zip(X, y):
ratings_of.setdefault(user_id, []).append(rating)"""
for (user_id, work_id), rating in zip(X, y):
self.M[self.inv_user[user_id], self.inv_work[work_id]] = rating #- np.mean(ratings_of[user_id])
# np.save('backupM', self.M)
self.chrono.save('fill matrix')
# Ranking computation
self.U, self.sigma, self.VT = randomized_svd(self.M, NB_COMPONENTS, n_iter=3, random_state=42)
# print('Formes', self.U.shape, self.sigma.shape, self.VT.shape)
self.save('backup.pickle')
self.chrono.save('factor matrix')
def predict(self, X):
y = []
for user_id, work_id in X:
i = self.inv_user[user_id]
j = self.inv_work[work_id]
y.append(self.U[i].dot(np.diag(self.sigma)).dot(self.VT.transpose()[j]))
return np.array(y)
def get_reco(self, username, sending=False):
target_user = User.objects.get(username=username)
the_user_id = target_user.id
svd_user = User.objects.get(username='svd')
work_ids = {self.inv_work[work_id]: work_id for work_id in self.inv_work}
nb_works = len(work_ids)
seen_works = set(Rating.objects.filter(user__id=the_user_id).exclude(choice='willsee').values_list('work_id', flat=True))
the_i = self.inv_user[the_user_id]
self.chrono.save('get_seen_works')
print('mon vecteur (taille %d)' % len(self.U[the_i]), self.U[the_i])
print(self.sigma)
for i, line in enumerate(self.VT):
print('=> Ligne %d' % (i + 1), '(ma note : %f)' % self.U[the_i][i])
sorted_line = sorted((line[j], self.work_titles[work_ids[j]]) for j in range(nb_works))[::-1]
top5 = sorted_line[:10]
bottom5 = sorted_line[-10:]
for anime in top5:
print(anime)
for anime in bottom5:
print(anime)
"""if i == 0 or i == 1: # First two vectors explaining variance
with open('vector%d.json' % (i + 1), 'w') as f:
vi = X.dot(line).tolist()
x_norm = [np.dot(X.data[k], X.data[k]) / (nb_works + 1) for k in range(nb_users + 1)]
f.write(json.dumps({'v': [v / math.sqrt(x_norm[k]) if x_norm[k] != 0 else float('inf') for k, v in enumerate(vi)]}))"""
# print(VT.dot(VT.transpose()))
# return
the_ratings = self.predict((the_user_id, work_ids[j]) for j in range(nb_works))
ranking = sorted(zip(the_ratings, [(work_ids[j], self.work_titles[work_ids[j]]) for j in range(nb_works)]), reverse=True)
# Summarize the results of the ranking for the_user_id:
# “=> rank, title, score”
c = 0
for i, (rating, (work_id, title)) in enumerate(ranking, start=1):
if work_id not in seen_works:
print('=>', i, title, rating, self.predict([(the_user_id, work_id)]))
if Recommendation.objects.filter(user=svd_user, target_user__id=the_user_id, work__id=work_id).count() == 0:
Recommendation.objects.create(user=svd_user, target_user_id=the_user_id, work_id=work_id)
c += 1
elif i < TOP:
print(i, title, rating)
if c >= TOP:
break
"""print(len(connection.queries), 'queries')
for line in connection.queries:
print(line)"""
self.chrono.save('complete')
def __str__(self):
return '[SVD]'
def get_shortname(self):
return 'svd'
| agpl-3.0 |
JeanKossaifi/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
berkeley-stat159/project-zeta | code/linear_model_scripts_sub4.py | 3 | 25730 | # Goal for this scripts:
#
# Perform linear regression and analyze the similarity in terms of the activated brain area when recognizing different
# objects in odd and even runs of subject 1
# Load required function and modules:
from __future__ import print_function, division
import numpy as np
import numpy.linalg as npl
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import gridspec
import os
import re
import json
import nibabel as nib
from utils import subject_class as sc
from utils import outlier
from utils import diagnostics as diagnos
from utils import get_object_neural as neural
from utils import stimuli
from utils import convolution as convol
from utils import smooth as sm
from utils import linear_model as lm
from utils import maskfunc as msk
from utils import affine
import copy
# important path:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..")
# where to store figures
figure_path = os.path.join(base_path, "code", "images", "")
# where to store txt files
file_path = os.path.join(base_path, "code", "txt", "")
# help to make directory to save figures and txt files
# if figure folder doesn't exist -> make it
if not os.path.exists(figure_path):
os.makedirs(figure_path)
# if txt folder doesn't exist -> make it
if not os.path.exists(file_path):
os.makedirs(file_path)
# color display:
# list of all objects in this study in alphabetical order
object_list = ["bottle", "cat", "chair", "face", "house", "scissors", "scrambledpix", "shoe"]
# assign color for task time course for each object
color_list_s = ["b", "g", "r", "c", "m", "y", "k", "sienna"]
match_color_s = dict(zip(object_list, color_list_s))
# assign color for convolved result for each object
color_list_c = ["royalblue", "darksage", "tomato", "cadetblue", "orchid", "goldenrod", "dimgrey", "sandybrown"]
match_color_c = dict(zip(object_list, color_list_c))
# color for showing beta values
nice_cmap_values = np.loadtxt(file_path + 'actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# assign object parameter number: each object has a iterable number
match_para = dict(zip(object_list, range(8)))
# check slice number:
# this is the specific slice we use to run 2D correlation
slice_number = 39
# separator for better report display
sec_separator = '#' * 80
separator = "-" * 80
# which subject to work on?
subid = "sub004"
# work on results from this subject:
################################### START #####################################
print (sec_separator)
print ("Project-Zeta: use linear regression to study ds105 dataset")
print (separator)
print ("Focus on %s for the analysis" % subid)
print (sec_separator)
print ("Progress: Clean up data")
print (separator)
# load important data for this subject by using subject_class
sub = sc.subject(subid)
# get image files of this subject:
sub_img = sub.run_img_result
# get run numbers of this subject:
run_num = len(sub.run_keys)
# report keys of all images:
print ("Import %s images" % subid)
print (separator)
print ("These images are imported:")
img_key = sub_img.keys()
img_key = sorted(img_key)
for i in img_key:
print (i)
# report how many runs in this subject
print ("There are %d runs for %s" % (run_num, subid))
print (separator)
# get data for those figures
print ("Get data from images...")
sub_data = {}
for key, img in sub_img.items():
sub_data[key] = img.get_data()
print ("Complete!")
print (separator)
# use rms_diff to check outlier for all runs of this subject
print ("Analyze outliers in these runs:")
for key, data in sub_data.items():
rms_diff = diagnos.vol_rms_diff(data)
# get outlier indices and the threshold for the outlier
rms_outlier_indices, rms_thresh = diagnos.iqr_outliers(rms_diff)
y_value2 = [rms_diff[i] for i in rms_outlier_indices]
# create figures to show the outlier
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.55, 0.75])
ax.plot(rms_diff, label='rms')
ax.plot([0, len(rms_diff)], [rms_thresh[1], rms_thresh[1]], "k--",\
label='high threshold', color='m')
ax.plot([0, len(rms_diff)], [rms_thresh[0], rms_thresh[0]], "k--",\
label='low threshold', color='c')
ax.plot(rms_outlier_indices, y_value2, 'o', color='g', label='outlier')
# label the figure
ax.set_xlabel('Scan time course')
ax.set_ylabel('Volumne RMS difference')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., numpoints=1)
fig.text(0.05, 0.9, 'Volume RMS Difference with Outliers for %s' % key, weight='bold')
# save figure
fig.savefig(figure_path + 'Volume_RMS_Difference_Outliers_%s.png' % key)
# clear figure
fig.clf()
# close pyplot window
plt.close()
# report
print ("Outlier analysis results are saved as figures!")
print (separator)
# remove outlier from images
sub_clean_img, outlier_index = outlier.remove_data_outlier(sub_img)
print ("Remove outlier:")
print ("outliers are removed from each run!")
print (sec_separator)
# run generate predicted bold signals:
print ("Progress: create predicted BOLD signals based on condition files")
# get general all tr times == 121*2.5 = about 300 s
# this is the x-axis to plot hemodynamic prediction
all_tr_times = np.arange(sub.BOLD_shape[-1]) * sub.TR
# the y-axis to plot hemodynamic prediction is the neural value from condition (on-off)
sub_neural = neural.get_object_neural(sub.sub_id, sub.conditions, sub.TR, sub.BOLD_shape[-1])
# report info for all run details
print ("The detailed run info for %s:" % subid)
neural_key = sub_neural.keys()
neural_key = sorted(neural_key)
for i in neural_key:
print (i)
print (separator)
# get task time course for all runs -> save as images
print ("generate task time course images")
print (separator)
for run in range(1, run_num):
# make plots to display task time course
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.55, 0.75])
for item in object_list:
check_key = "run0%02d-%s" % (run, item)
ax.plot(all_tr_times, sub_neural[check_key][0], label="%s" % item, c=match_color_s[item])
# make labels:
ax.set_title("Task time course for %s-run0%02d" % (subid, run), weight='bold')
ax.set_xlabel("Time course (second)")
ax.set_ylabel("Task (Off = 0, On = 1)")
ax.set_yticks([0, 1])
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., numpoints=1)
# save figure
fig.savefig(figure_path + "Task_time_course_%s_run0%02d" % (subid, run))
# clear figure
fig.clf()
# close pyplot window
plt.close()
# report
print ("task time course images are saved!")
print (separator)
# assume true HRF starts at zero, and gets to zero sometime before 35 seconds.
tr_times = np.arange(0, 30, sub.TR)
hrf_at_trs = convol.hrf(tr_times)
# get convolution data for each objects in this run -> show figure for run001
print ("Work on convolution based on condition files:")
print (separator)
sub_convolved = convol.get_all_convolved(sub_neural, hrf_at_trs, file_path)
print ("convolution analysis for all runs is complete")
print (separator)
# save convolved data
for key, data in sub_convolved.items():
np.savetxt(file_path + "convolved_%s.txt" % key, data)
print ("convolved results are saved as txt files")
# show relationship between task time course and bold signals
print ("Show relationship between task time course and predicted BOLD signals")
# get keys for each neural conditions
sub_neural_key = sub_neural.keys()
# sort key for better display
sub_neural_key = sorted(sub_neural_key)
# create figures
fig = plt.figure()
for run in range(1, run_num):
ax = plt.subplot(111)
ax2 = ax.twinx()
figures = {}
count = 0
for item in object_list:
# focus on one at a time:
check_key = "run0%02d-%s" % (run, item)
# perform convolution to generate estimated BOLD signals
convolved = convol.convolution(sub_neural[check_key][0], hrf_at_trs)
# plot the task time course and the estimated BOLD signals in same plot
# plot the estimated BOLD signals
# plot the task time course
figures["fig" + "%s" % str(count)] = ax.plot(all_tr_times, sub_neural[check_key][0], c=match_color_s[item], label="%s-task" % item)
count += 1
# plot estimated BOLD signal
figures["fig" + "%s" % str(count)] = ax2.plot(all_tr_times, convolved, c=match_color_c[item], label="%s-BOLD" % item)
count += 1
# label this plot
plt.subplots_adjust(left=0.1, right=0.6, bottom=0.1, top=0.85)
plt.text(0.25, 1.05, "Hemodynamic prediction of %s-run0%02d" % (subid, run), weight='bold')
ax.set_xlabel("Time course (second)")
ax.set_ylabel("Task (Off = 0, On = 1)")
ax.set_yticks([-0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax2.set_ylabel("Estimated BOLD signal")
ax2.set_yticks([-0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0])
# label legend
total_figures = figures["fig0"]
for i in range(1, len(figures)):
total_figures += figures["fig" + "%s" % str(i)]
labs = [fig.get_label() for fig in total_figures]
ax.legend(total_figures, labs, bbox_to_anchor=(1.2, 1.0), loc=0, borderaxespad=0., fontsize=11)
# save plot
plt.savefig(figure_path + "%s_run0%02d_bold_prediction.png" % (subid, run))
# clear plot
plt.clf()
# close pyplot window
plt.close()
print (sec_separator)
# remove outlier from convolved results
print("Progress: clean up convolved results")
sub_convolved = convol.remove_outlier(sub.sub_id, sub_convolved, outlier_index)
print ("Outliers are removed from convolved results")
print (sec_separator)
# smooth the images:
print ("Progress: Smooth images")
# subject clean and smooth img == sub_cs_img
sub_cs_img = sm.smooth(sub_clean_img)
print ("Smooth images: Complete!")
print (sec_separator)
# get shape info of the images
print ("Progress: record shape information")
shape = {}
for key, img in sub_cs_img.items():
shape[key] = img.shape
print ("shape of %s = %s" % (key, shape[key]))
with open(file_path+'new_shape.json', 'a') as fp:
json.dump(shape, fp)
print ("New shape info of images is recorded and saved as file")
print (sec_separator)
############################## Linear regression ##############################
print ("Let's run Linear regression")
print (separator)
# generate design matrix
print ("Progress: generate design matrix")
# generate design matrix for each runs
design_matrix = lm.batch_make_design(sub_cs_img, sub_convolved)
# check parameter numbers
parameters = design_matrix["%s_run001" % subid].shape[-1]
print ("parameter number: %d" % parameters)
print ("Design matrix generated")
print (separator)
# rescale design matrix
print ("Progress: rescale design matrix")
design_matrix = lm.batch_scale_matrix(design_matrix)
print ("Rescale design matrix: complete!")
# save scaled design matrix as figure
# plot scaled design matrix
fig = plt.figure(figsize=(8.0, 8.0))
for key, matrix in design_matrix.items():
ax = plt.subplot(111)
ax.imshow(matrix, aspect=0.1, interpolation="nearest", cmap="gray")
# label this plot
fig.text(0.15, 0.95, "scaled design matrix for %s" % key, weight='bold', fontsize=18)
ax.set_xlabel("Parameters", fontsize=16)
ax.set_xticklabels([])
ax.set_ylabel("Scan time course", fontsize=16)
# save plot
plt.savefig(figure_path + "design_matrix_%s" % key)
# clean plot
plt.clf()
# close pyplot window
plt.close()
print ("Design matrices are saved as figures")
print (separator)
# use maskfunc to generate mask
print ("Progress: generate mask for brain images")
mask, mean_data = msk.generateMaskedBrain(sub_clean_img)
print ("Generate mask for brain images: complete!")
# save mask as figure
for key, each in mask.items():
for i in range(1, 90):
plt.subplot(9, 10, i)
plt.imshow(each[:, :, i], interpolation="nearest", cmap="gray", alpha=0.5)
# label plot
ax = plt.gca()
ax.set_xticklabels([])
ax.set_yticklabels([])
# save plot
plt.savefig(figure_path + "all_masks_for_%s.png" % key)
# clear plot
plt.clf()
# close pyplot window
plt.close()
print (separator)
# run linear regression to generate betas
# first step: use mask to get data and reshape to 2D
print ("Progress: Use mask to subset brain images")
sub_cs_mask_img = lm.apply_mask(sub_cs_img, mask)
sub_cs_mask_img_2d = lm.batch_convert_2d_based(sub_cs_mask_img, shape)
# sub1_cs_mask_img_2d = lm.batch_convert_2d(sub1_cs_mask_img)
print ("Use mask to subset brain images: complete!")
print (separator)
# second step: run linear regression to get betas:
print ("Progress: Run linear regression to get beta hat")
all_betas = {}
for key, img in sub_cs_mask_img_2d.items():
#img_2d = np.reshape(img, (-1, img.shape[-1]))
Y = img.T
all_betas[key] = npl.pinv(design_matrix[key]).dot(Y)
print ("Getting betas from linear regression: complete!")
print (separator)
# third step: put betas back into it's original place:
print ("Save beta figures:")
beta_vols = {}
raw_beta_vols = {}
for key, betas in all_betas.items():
# create 3D zeros to hold betas
beta_vols[key] = np.zeros(shape[key][:-1] + (parameters,))
# get the mask info
check_mask = (mask[key] == 1)
# fit betas back to 3D
beta_vols[key][check_mask] = betas.T
print ("betas of %s is fitted back to 3D!" % key)
# save 3D betas in dictionary
raw_beta_vols[key] = beta_vols[key]
# get min and max of figure
vmin = beta_vols[key][:, :, 21:70].min()
vmax = beta_vols[key][:, :, 21:70].max()
# clear the background
beta_vols[key][~check_mask] = np.nan
mean_data[key][~check_mask] = np.nan
# plot betas
fig = plt.figure(figsize=(8.0, 8.0))
for item in object_list:
# plot 50 pictures
fig, axes = plt.subplots(nrows=5, ncols=10)
lookat = 20
for ax in axes.flat:
# show plot from z= 21~70
ax.imshow(mean_data[key][:, :, lookat], interpolation="nearest", cmap="gray", alpha=0.5)
im = ax.imshow(beta_vols[key][:, :, lookat, match_para[item]], cmap=nice_cmap, alpha=0.5)
# label the plot
ax.set_xticks([])
ax.set_yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
lookat += 1
# label the plot
fig.subplots_adjust(bottom=0.2, hspace=0)
fig.text(0.28, 0.9, "Brain area responding to %s in %s" % (item, subid), weight='bold')
# color bar
cbar_ax = fig.add_axes([0.15, 0.08, 0.7, 0.04])
fig.colorbar(im, cax=cbar_ax, ticks=[], orientation='horizontal')
fig.text(0.35, 0.15, "Relative responding intensity")
fig.text(0.095, 0.09, "Low")
fig.text(0.87, 0.09, "High")
# save plot
plt.savefig(figure_path + "betas_for_%s_%s.png" % (key, item))
# clear plot
plt.clf()
# close pyplot window
plt.close()
# report
print ("beta figures are generated!!")
print (sec_separator)
# analyze based on odd runs even runs using affine
print ("Progress: Use affine matrix to check brain position")
print ("print affine matrix for each images:")
affine_matrix = {}
for key, img in sub_img.items():
affine_matrix[key] = img.affine
print("%s :\n %s" % (key, img.affine))
# check if they all have same affine matrix
same_affine = True
check_matrix = affine_matrix["%s_run001" % subid]
for key, aff in affine_matrix.items():
if aff.all() != check_matrix.all():
same_affine = False
if same_affine:
print ("They have the same affine matrix!")
else:
print ("They don't have same affine matrix -> be careful about the brain position")
print (sec_separator)
############################## 2D correlation #################################
# Focus on 2D slice to run the analysis:
print ("Progress: Try 2D correlation")
print ("Focus on one slice: k = %d" % slice_number)
print (separator)
print ("Run correlation between run1_house, run2_house and run2_face")
# get 2D slice for run1 house
run1_house = raw_beta_vols["%s_run001" % subid][:, 25:50, slice_number, 5]
# save as plot
plt.imshow(run1_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("%s_Run1_House" % subid)
plt.savefig(figure_path + "%s_run1_house.png" % subid)
plt.clf()
# get 2D slice of run2 house
run2_house = raw_beta_vols["%s_run002" % subid][:, 25:50, slice_number, 5]
# save as plot
plt.imshow(run2_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("%s_Run2_House" % subid)
plt.savefig(figure_path + "%s_run2_house.png" % subid)
plt.clf()
# get 2D slice for run2 face
run2_face = raw_beta_vols["%s_run002" % subid][:, 25:50, slice_number, 4]
# save as plot
plt.imshow(run2_face, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("%s_Run2_Face" % subid)
plt.savefig(figure_path + "%s_run2_face.png" % subid)
plt.close()
# put those 2D plots together
fig = plt.figure()
plt.subplot(1, 3, 1, xticks=[], yticks=[], xticklabels=[], yticklabels=[])
plt.imshow(run1_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("Sub%s_Run1_House" % subid[-1], weight='bold', fontsize=10)
plt.subplot(1, 3, 2, xticks=[], yticks=[])
plt.imshow(run2_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("Sub%s_Run2_House" % subid[-1], weight='bold', fontsize=10)
plt.subplot(1, 3, 3, xticks=[], yticks=[])
plt.imshow(run2_face, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("Sub%s_Run2_Face" % subid[-1], weight='bold', fontsize=10)
# label plot
fig.subplots_adjust(bottom=0.2, hspace=0)
cbar_ax = fig.add_axes([0.15, 0.08, 0.7, 0.04])
plt.colorbar(cax=cbar_ax, ticks=[], orientation='horizontal')
fig.text(0.35, 0.15, "Relative responding intensity")
fig.text(0.095, 0.09, "Low")
fig.text(0.87, 0.09, "High")
# save plot
plt.savefig(figure_path + "%s_run_figure_compile.png" % subid)
# close pyplot window
plt.close()
print ("plots for analysis are saved as figures")
print ("Progress: Run correlation coefficient")
# create a deepcopy of raw_beta_vols for correlation analysis:
raw_beta_vols_corr = copy.deepcopy(raw_beta_vols)
# flatten the 2D matrix
house1 = np.ravel(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, match_para["house"]])
house2 = np.ravel(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, match_para["house"]])
face2 = np.ravel(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, match_para["face"]])
# save flatten results for further analysis
np.savetxt(file_path + "%s_house1.txt" % subid, house1)
np.savetxt(file_path + "%s_house2.txt" % subid, house2)
np.savetxt(file_path + "%s_face2.txt" % subid, face2)
# change nan to 0 in the array
house1[np.isnan(house1)] = 0
house2[np.isnan(house2)] = 0
face2[np.isnan(face2)] = 0
# correlation coefficient study:
house1_house2 = np.corrcoef(house1, house2)
house1_face2 = np.corrcoef(house1, face2)
print ("%s run1 house vs run2 house: %s" % (subid, house1_house2))
print ("%s run1 house vs run2 face : %s" % (subid, house1_face2))
print (sec_separator)
# save individual 2D slice as txt for further analysis
print ("save 2D result for each object and each run individually as txt file")
for i in range(1, run_num+1):
for item in object_list:
temp = raw_beta_vols_corr["%s_run0%02d" % (subid, i)][:, 25:50, slice_number, match_para[item]]
np.savetxt(file_path + "%s_run0%02d_%s.txt" % (subid, i, item), np.ravel(temp))
print ("Complete!!")
print (sec_separator)
# analyze based on odd runs even runs
print ("Progress: prepare data to run correlation based on odd runs and even runs:")
print ("Take average of odd run / even run results to deal with impacts of variations between runs")
even_run = {}
odd_run = {}
even_count = 0
odd_count = 0
# add up even run results / odd run results and take mean for each groups
for item in object_list:
even_run[item] = np.zeros_like(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, 5])
odd_run[item] = np.zeros_like(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, 5])
print ("make average of odd run results:")
# add up odd run results
for i in range(1, run_num+1, 2):
temp = raw_beta_vols_corr["%s_run0%02d" % (subid, i)][:, 25:50, slice_number, match_para[item]]
temp[np.isnan(temp)] = 0
odd_run[item] += temp
odd_count += 1
print("odd runs: %d-%s" % (i, item))
print ("make average od even run results:")
# take mean
odd_run[item] = odd_run[item]/odd_count
# add up even run results
for i in range(2, run_num+1, 2):
temp = raw_beta_vols_corr["%s_run0%02d" % (subid, i)][:, 25:50, slice_number, match_para[item]]
temp[np.isnan(temp)] = 0
even_run[item] += temp
even_count += 1
print("even: %d, %s" % (i, item))
# take mean
even_run[item] = even_run[item]/even_count
print (separator)
# save odd run and even run results as txt file
print ("Progress: save flatten mean odd / even run results as txt files")
for key, fig in even_run.items():
np.savetxt(file_path + "%s_even_%s.txt" % (subid, key), np.ravel(fig))
for key, fig in odd_run.items():
np.savetxt(file_path + "%s_odd_%s.txt" % (subid, key), np.ravel(fig))
print ("odd run and even run results are saved as txt files!!!!!")
print (separator)
############################ 3D correlation ###################################
# check 3D:
print ("Focus on one 3D analysis, shape = [:, 25:50, 31:36]")
# put 3D slice of run1 house, run2 face, run2 house together
fig = plt.figure()
i = 1
run1_house = raw_beta_vols["%s_run001" % subid][:, 25:50, 37:42, match_para["house"]]
for z in range(5):
plt.subplot(3, 5, i, xticks=[], yticks=[])
plt.imshow(run1_house[:, :, z], interpolation="nearest", cmap=nice_cmap, alpha=0.5)
i += 1
if z == 2:
plt.title("%s_run1_house" % subid)
run2_house = raw_beta_vols["%s_run002" % subid][:, 25:50, 37:42, match_para["house"]]
for z in range(5):
plt.subplot(3, 5, i, xticks=[], yticks=[])
plt.imshow(run2_house[:, :, z], interpolation="nearest", cmap=nice_cmap, alpha=0.5)
i += 1
if z == 2:
plt.title("%s_run2_house" % subid)
run2_face = raw_beta_vols["%s_run002" % subid][:, 25:50, 37:42, match_para["face"]]
for z in range(5):
plt.subplot(3, 5, i, xticks=[], yticks=[])
plt.imshow(run2_face[:, :, z], interpolation="nearest", cmap=nice_cmap, alpha=0.5)
i += 1
if z == 2:
plt.title("%s_run2_face" % subid)
# label plot
fig.subplots_adjust(bottom=0.2, hspace=0.5)
cbar_ax = fig.add_axes([0.15, 0.06, 0.7, 0.02])
plt.colorbar(cax=cbar_ax, ticks=[], orientation='horizontal')
fig.text(0.35, 0.1, "Relative responding intensity")
fig.text(0.095, 0.07, "Low")
fig.text(0.87, 0.07, "High")
plt.savefig(figure_path + "Try_3D_correlation_%s.png" % subid)
plt.close()
# try to run 3D correlation study:
print ("Progress: Run correlation coefficient with 3D data")
# make a deepcopy of the raw_beta_vols for correlation study:
raw_beta_vols_3d_corr = copy.deepcopy(raw_beta_vols)
# get flatten 3D slice:
house1_3d = np.ravel(raw_beta_vols_3d_corr["%s_run001" % subid][:, 25:50, 37:42, match_para["house"]])
house2_3d = np.ravel(raw_beta_vols_3d_corr["%s_run002" % subid][:, 25:50, 37:42, match_para["house"]])
face2_3d = np.ravel(raw_beta_vols_3d_corr["%s_run002" % subid][:, 25:50, 37:42, match_para["face"]])
# change nan to 0 in the array
house1_3d[np.isnan(house1_3d)] = 0
house2_3d[np.isnan(house2_3d)] = 0
face2_3d[np.isnan(face2_3d)] = 0
# correlation coefficient study:
threeD_house1_house2 = np.corrcoef(house1_3d, house2_3d)
threeD_house1_face2 = np.corrcoef(house1_3d, face2_3d)
print ("%s run1 house vs run2 house in 3D: %s" % (subid, threeD_house1_house2))
print ("%s run1 house vs run2 face in 3D: %s" % (subid, threeD_house1_face2))
print (separator)
# prepare data to analyze 3D brain based on odd runs even runs
print ("Prepare data to analyze \"3D\" brain based on odd runs and even runs:")
print ("Take average of \"3D\" odd runs / even runs to deal with impacts of variations between runs")
even_run_3d = {}
odd_run_3d = {}
# add up even run results / odd run results and take mean for each groups
for item in object_list:
even_run_3d[item] = np.zeros_like(raw_beta_vols_3d_corr["%s_run001" % subid][:, 25:50, 37:42, match_para[item]])
odd_run_3d[item] = np.zeros_like(raw_beta_vols_3d_corr["%s_run001" % subid][:, 25:50, 37:42, match_para[item]])
print ("make average of \"3D\" odd run results:")
# add up odd runs results
for i in range(1, run_num+1, 2):
temp = raw_beta_vols_3d_corr["%s_run0%02d" % (subid, i)][:, 25:50, 37:42, match_para[item]]
temp[np.isnan(temp)] = 0
odd_run_3d[item] += temp
print("odd runs 3D: %d-%s" % (i, item))
# take mean
odd_run_3d[item] = odd_run_3d[item]/odd_count
print ("make average of \"3D\" even run results:")
# add up even runs results
for i in range(2, run_num+1, 2):
temp = raw_beta_vols_3d_corr["%s_run0%02d" % (subid, i)][:, 25:50, 37:42, match_para[item]]
temp[np.isnan(temp)] = 0
even_run_3d[item] += temp
print("even runs 3D: %d-%s" % (i, item))
# take mean
even_run_3d[item] = even_run_3d[item]/even_count
# save odd run and even run results as txt file
for key, fig in even_run_3d.items():
np.savetxt(file_path + "%s_even_%s_3d.txt" % (subid, key), np.ravel(fig))
for key, fig in odd_run_3d.items():
np.savetxt(file_path + "%s_odd_%s_3d.txt" % (subid, key), np.ravel(fig))
print ("\"3D\" odd run and even run results are saved as txt files!!!!!")
print (separator)
print ("Analysis and Data Pre-processing for %s : Complete!!!" % subid) | bsd-3-clause |
stwunsch/gnuradio | gr-utils/python/utils/plot_data.py | 59 | 5818 | #
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Utility to help plotting data from files.
"""
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.f = scipy.array(f)
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
dustinbcox/biodatalogger | biodata_grapher.py | 1 | 1356 | #!/usr/bin/python2.7
"""
Biodata_grapher
2015-08-30
"""
import glob
import csv
import os
import matplotlib.pyplot as plt
import matplotlib
from datetime import datetime
import traceback
for filename in glob.glob('*_biodatalogger_readings.csv'):
filename_png = filename.replace('.csv', '.png')
if os.path.exists(filename_png):
print "Skipping:", filename, "since png file already exists"
continue
y_data = []
x_data = []
with open(filename, 'r') as csvfile:
tempreader = csv.reader(csvfile, delimiter=',', quotechar='"')
try:
for row in tempreader:
dateinfo, celsius, fahrenheit = row
dateinfo = datetime.strptime(dateinfo, "%Y-%m-%d %H:%M:%S.%f")
day = dateinfo.strftime("%Y-%m-%d")
x_data.append(dateinfo)
#y_data.append(fahrenheit)
y_data.append(celsius)
except:
print "file:", filename
print "row:", row
traceback.print_exc()
#plt.plot(x_data, matplotlib.dates.date2num(y_data))
matplotlib.pyplot.plot_date(x_data,y_data)
#plt.ylabel('Temp (F)')
plt.ylabel('Temp (C)')
plt.title('Temperature for ' + day)
plt.xlabel('Time')
plt.gcf().autofmt_xdate()
plt.savefig(filename_png)
plt.show()
| gpl-2.0 |
ssaeger/scikit-learn | sklearn/cross_validation.py | 7 | 67336 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
janhahne/nest-simulator | pynest/nest/tests/test_spatial/test_plotting.py | 12 | 5748 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic spatial plotting functions.
"""
import unittest
import nest
import numpy as np
try:
import matplotlib.pyplot as plt
tmp_fig = plt.figure() # make sure we can open a window; DISPLAY may not be set
plt.close(tmp_fig)
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE,
'Plotting impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
nest.PlotLayer(l)
plotted_datapoints = plt.gca().collections[-1].get_offsets().data
reference_datapoints = nest.GetPosition(l)
self.assertTrue(np.allclose(plotted_datapoints, reference_datapoints))
def test_PlotTargets(self):
"""Test plotting targets."""
delta = 0.05
mask = {'rectangular': {'lower_left': [-delta, -2/3 - delta], 'upper_right': [2/3 + delta, delta]}}
cdict = {'rule': 'pairwise_bernoulli', 'p': 1.,
'mask': mask}
sdict = {'synapse_model': 'stdp_synapse'}
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
# connect l -> l
nest.Connect(l, l, cdict, sdict)
ctr = nest.FindCenterElement(l)
fig = nest.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
plotted_datapoints = plt.gca().collections[0].get_offsets().data
eps = 0.01
pos = np.array(nest.GetPosition(l))
pos_xmask = pos[np.where(pos[:, 0] > -eps)]
reference_datapoints = pos_xmask[np.where(pos_xmask[:, 1] < eps)][::-1]
self.assertTrue(np.array_equal(np.sort(plotted_datapoints, axis=0), np.sort(reference_datapoints, axis=0)))
fig = nest.PlotTargets(ctr, l, mask=mask)
ax = fig.gca()
ax.set_title('Call with mask')
self.assertGreaterEqual(len(ax.patches), 1)
def test_plot_probability_kernel(self):
"""Plot parameter probability"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
def probability_calculation(distance):
return 1 - 1.5*distance
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
source = l[25]
source_pos = np.array(nest.GetPosition(source))
source_x, source_y = source_pos
# Calculate reference values
ref_probability = np.zeros(plot_shape[::-1])
for i, x in enumerate(np.linspace(plot_edges[0], plot_edges[1], plot_shape[0])):
positions = np.array([[x, y] for y in np.linspace(plot_edges[2], plot_edges[3], plot_shape[1])])
ref_distances = np.sqrt((positions[:, 0] - source_x)**2 + (positions[:, 1] - source_y)**2)
values = probability_calculation(ref_distances)
ref_probability[:, i] = np.maximum(np.minimum(np.array(values), 1.0), 0.0)
# Create the parameter
parameter = probability_calculation(nest.spatial.distance)
fig, ax = plt.subplots()
nest.PlotProbabilityParameter(source, parameter, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
img = ax.images[0]
img_data = img.get_array().data
self.assertTrue(np.array_equal(img_data, ref_probability))
def test_plot_probability_kernel_with_mask(self):
"""Plot parameter probability with mask"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
parameter = 1 - 1.5*nest.spatial.distance
source = l[25]
masks = [{'circular': {'radius': 0.4}},
{'doughnut': {'inner_radius': 0.2, 'outer_radius': 0.45}},
{'rectangular': {'lower_left': [-.3, -.3], 'upper_right': [0.3, 0.3]}},
{'elliptical': {'major_axis': 0.8, 'minor_axis': 0.4}}]
fig, axs = plt.subplots(2, 2)
for mask, ax in zip(masks, axs.flatten()):
nest.PlotProbabilityParameter(source, parameter, mask=mask, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
self.assertGreaterEqual(len(ax.patches), 1)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
plt.show()
| gpl-2.0 |
RRCKI/pilot | ATLASSiteInformation.py | 1 | 42230 | # Class definition:
# ATLASSiteInformation
# This class is the ATLAS site information class inheriting from SiteInformation
# Instances are generated with SiteInformationFactory via pUtil::getSiteInformation()
# Implemented as a singleton class
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# import relevant python/pilot modules
import os
import sys, httplib, cgi, urllib
import commands
import SiteMover
from SiteInformation import SiteInformation # Main site information class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import getExtension # Used to determine file type of Tier-1 info file
from PilotErrors import PilotErrors # Error codes
class ATLASSiteInformation(SiteInformation):
# private data members
__experiment = "ATLAS"
__instance = None
__error = PilotErrors() # PilotErrors object
__securityKeys = {}
# Required methods
def __init__(self):
""" Default initialization """
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASSiteInformation, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
def isTier1(self, sitename):
""" Is the given site a Tier-1? """
# E.g. on a Tier-1 site, the alternative stage-out algorithm should not be used
# Note: sitename is PanDA sitename, not DQ2 sitename
status = False
for cloud in self.getCloudList():
if sitename in self.getTier1List(cloud):
status = True
break
return status
def isTier2(self, sitename):
""" Is the given site a Tier-2? """
# Logic: it is a T2 if it is not a T1 or a T3
return (not (self.isTier1(sitename) or self.isTier3()))
def isTier3(self):
""" Is the given site a Tier-3? """
# Note: defined by DB
if readpar('ddm') == "local":
status = True
else:
status = False
return status
def getCloudList(self):
""" Return a list of all clouds """
tier1 = self.setTier1Info()
return tier1.keys()
def setTier1Info(self):
""" Set the Tier-1 information """
tier1 = {"CA": ["TRIUMF", ""],
"CERN": ["CERN-PROD", ""],
"DE": ["FZK-LCG2", ""],
"ES": ["pic", ""],
"FR": ["IN2P3-CC", ""],
"IT": ["INFN-T1", ""],
"ND": ["ARC", ""],
"NL": ["SARA-MATRIX", ""],
"OSG": ["BNL_CVMFS_1", ""],
"RU": ["RRC-KI-T1", ""],
"TW": ["Taiwan-LCG2", ""],
"UK": ["RAL-LCG2", ""],
"US": ["BNL_PROD", "BNL_PROD-condor"]
}
return tier1
def getTier1Name(self, cloud):
""" Return the the site name of the Tier 1 """
return self.getTier1List(cloud)[0]
def getTier1List(self, cloud):
""" Return a Tier 1 site/queue list """
# Cloud : PanDA site, queue
tier1 = self.setTier1Info()
return tier1[cloud]
def getTier1InfoFilename(self):
""" Get the Tier-1 info file name """
filename = "Tier-1_info.%s" % (getExtension())
path = "%s/%s" % (os.environ['PilotHomeDir'], filename)
return path
def downloadTier1Info(self):
""" Download the Tier-1 info file """
ec = 0
path = self.getTier1InfoFilename()
filename = os.path.basename(path)
dummy, extension = os.path.splitext(filename)
# url = "http://adc-ssb.cern.ch/SITE_EXCLUSION/%s" % (filename)
if extension == ".json":
_cmd = "?json"
# _cmd = "?json&preset=ssbpilot"
else:
_cmd = "?preset=ssbpilot"
url = "http://atlas-agis-api.cern.ch/request/site/query/list/%s" % (_cmd)
cmd = 'curl --connect-timeout 20 --max-time 120 -sS "%s" > %s' % (url, path)
if os.path.exists(path):
tolog("File %s already available" % (path))
else:
tolog("Will download file: %s" % (filename))
try:
tolog("Executing command: %s" % (cmd))
ret, output = commands.getstatusoutput(cmd)
except Exception, e:
tolog("!!WARNING!!1992!! Could not download file: %s" % (e))
ec = -1
else:
tolog("Done")
return ec
def getTier1Queue(self, cloud):
""" Download the queuedata for the Tier-1 in the corresponding cloud and get the queue name """
# Download the entire set of queuedata
all_queuedata_dict = self.getAllQueuedata()
# Get the name of the Tier 1 for the relevant cloud, e.g. "BNL_PROD"
pandaSiteID = self.getTier1Name(cloud)
# Return the name of corresponding Tier 1 queue, e.g. "BNL_PROD-condor"
return self.getTier1Queuename(pandaSiteID, all_queuedata_dict)
def getTier1Queue2(self, cloud):
""" Download the queuedata for the Tier-1 in the corresponding cloud and get the queue name """
queuename = ""
path = self.getTier1InfoFilename()
ec = self.downloadTier1Info()
if ec == 0:
# Process the downloaded T-1 info
f = open(path, 'r')
if getExtension() == "json":
from json import loads
data = loads(f.read())
else:
from pickle import load
data = load(f)
f.close()
# Extract the relevant queue info for the given cloud
T1_info = [x for x in data if x['cloud']==cloud]
# finally get the queue name
if T1_info != []:
info = T1_info[0]
if info.has_key('PanDAQueue'):
queuename = info['PanDAQueue']
else:
tolog("!!WARNING!!1222!! Returned Tier-1 info object does not have key PanDAQueue: %s" % str(info))
else:
tolog("!!WARNING!!1223!! Found no Tier-1 info for cloud %s" % (cloud))
return queuename
def getAllQueuedataFilename(self):
""" Get the file name for the entire schedconfig dump """
return os.path.join(os.getcwd(), "queuenames.json")
def downloadAllQueuenames(self):
""" Download the entire schedconfig from AGIS """
ec = 0
# Do not even bother to download anything if JSON is not supported
try:
from json import load
except:
tolog("!!WARNING!!1231!! JSON is not available, cannot download schedconfig dump")
ec = -1
else:
# url = "http://atlas-agis-api-dev.cern.ch/request/pandaqueue/query/list/?json"
url = "http://atlas-agis-api.cern.ch/request/pandaqueue/query/list/?json&preset=schedconf.all&tier_level=1&type=production"
schedconfig_dump = self.getAllQueuedataFilename()
cmd = "curl \'%s\' >%s" % (url, schedconfig_dump)
if os.path.exists(schedconfig_dump):
tolog("File %s already downloaded" % (schedconfig_dump))
else:
tolog("Executing command: %s" % (cmd))
ec, out = commands.getstatusoutput(cmd)
if ec != 0:
tolog("!!WARNING!!1234!! Failed to download %s: %d, %s" % (schedconfig_dump, ec, out))
else:
tolog("Downloaded schedconfig dump")
return ec
def getAllQueuedata(self):
""" Get the dictionary containing all the queuedata (for all sites) """
all_queuedata_dict = {}
# Download the entire schedconfig
ec = self.downloadAllQueuenames()
if ec == 0:
# Parse the schedconfig dump
schedconfig_dump = self.getAllQueuedataFilename()
try:
f = open(schedconfig_dump)
except Exception, e:
tolog("!!WARNING!!1001!! Could not open file: %s, %s" % (schedconfig_dump, e))
else:
# Note: json is required since the queuedata dump is only available in json format
from json import load
# Load the dictionary
all_queuedata_dict = load(f)
# Done with the file
f.close()
return all_queuedata_dict
def getTier1Queuename(self, pandaSiteID, all_queuedata_dict):
""" Find the T-1 queuename from the schedconfig dump """
t1_queuename = ""
# Loop over all schedconfig entries
for queuename in all_queuedata_dict.keys():
if all_queuedata_dict[queuename].has_key("panda_resource"):
if all_queuedata_dict[queuename]["panda_resource"] == pandaSiteID:
t1_queuename = queuename
break
return t1_queuename
def allowAlternativeStageOut(self, flag=False):
""" Is alternative stage-out allowed? """
# E.g. if stage-out to primary SE (at Tier-2) fails repeatedly, is it allowed to attempt stage-out to secondary SE (at Tier-1)?
# For ATLAS, flag=isAnalysisJob(). Alt stage-out is currently disabled for user jobs, so do not allow alt stage-out to be forced.
if "allow_alt_stageout" in readpar('catchall') and not flag:
status = True
else:
status = False
# if enableT1stageout.lower() == "true" or enableT1stageout.lower() == "retry":
# status = True
# else:
# status = False
return status
def forceAlternativeStageOut(self, flag=False):
""" Force stage-out to use alternative SE """
# See allowAlternativeStageOut()
# For ATLAS, flag=isAnalysisJob(). Alt stage-out is currently disabled for user jobs, so do not allow alt stage-out to be forced.
tolog("ATLAS")
if "force_alt_stageout" in readpar('catchall') and not flag:
status = True
else:
status = False
return status
def getProperPaths(self, error, analyJob, token, prodSourceLabel, dsname, filename, **pdict):
""" Get proper paths (SURL and LFC paths) """
ec = 0
pilotErrorDiag = ""
tracer_error = ""
dst_gpfn = ""
lfcdir = ""
surl = ""
alt = pdict.get('alt', False)
scope = pdict.get('scope', None)
# Get the proper endpoint
sitemover = SiteMover.SiteMover()
se = sitemover.getProperSE(token, alt=alt)
# For production jobs, the SE path is stored in seprodpath
# For analysis jobs, the SE path is stored in sepath
destination = sitemover.getPreDestination(analyJob, token, prodSourceLabel, alt=alt)
if destination == '':
pilotErrorDiag = "put_data destination path in SE not defined"
tolog('!!WARNING!!2990!! %s' % (pilotErrorDiag))
tracer_error = 'PUT_DEST_PATH_UNDEF'
ec = error.ERR_STAGEOUTFAILED
return ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl
else:
tolog("Going to store job output at: %s" % (destination))
# /dpm/grid.sinica.edu.tw/home/atlas/atlasscratchdisk/
# rucio path:
# SE + destination + SiteMover.getPathFromScope(scope,lfn)
# Get the LFC path
lfcpath, pilotErrorDiag = sitemover.getLFCPath(analyJob, alt=alt)
if lfcpath == "":
tracer_error = 'LFC_PATH_EMPTY'
ec = error.ERR_STAGEOUTFAILED
return ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl
tolog("LFC path = %s" % (lfcpath))
# /grid/atlas/users/pathena
ec, pilotErrorDiag, dst_gpfn, lfcdir = sitemover.getFinalLCGPaths(analyJob, destination, dsname, filename, lfcpath, token, prodSourceLabel, scope=scope, alt=alt)
if ec != 0:
tracer_error = 'UNKNOWN_DSN_FORMAT'
return ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl
# srm://f-dpm001.grid.sinica.edu.tw:8446/srm/managerv2?SFN=/dpm/grid.sinica.edu.tw/home/atlas/atlasscratchdisk/rucio/data12_8TeV/55/bc/NTUP_SUSYSKIM.01161650._000003.root.1
# surl = srm://f-dpm001.grid.sinica.edu.tw:8446/srm/managerv2?SFN=/dpm/grid.sinica.edu.tw/home/atlas/atlasscratchdisk/user/apetrid/0328091854/user.apetrid.0328091854.805485.lib._011669/user.apetrid.0328091854.805485.lib._011669.lib.tgz
# Define the SURL
if "/rucio" in destination:
surl = sitemover.getFullPath(scope, token, filename, analyJob, prodSourceLabel, alt=alt)
else:
surl = "%s%s" % (se, dst_gpfn)
# Correct the SURL which might start with something like 'token:ATLASMCTAPE:srm://srm-atlas.cern.ch:8443/srm/man/..'
# If so, remove the space token before the srm info
if surl.startswith('token'):
tolog("Removing space token part from SURL")
dummy, surl = sitemover.extractSE(surl)
tolog("SURL = %s" % (surl))
tolog("dst_gpfn = %s" % (dst_gpfn))
tolog("lfcdir = %s" % (lfcdir))
return ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl
def verifyRucioPath(self, spath, seprodpath='seprodpath'):
""" Make sure that the rucio path in se[prod]path is correctly formatted """
# A correctly formatted rucio se[prod]path should end with /rucio
if "rucio" in spath:
if spath.endswith('rucio'):
if spath.endswith('/rucio'):
tolog("Confirmed correctly formatted rucio %s" % (seprodpath))
else:
tolog("!!WARNING!!1234!! rucio path in %s is not correctly formatted: %s" % (seprodpath, spath))
spath = spath.replace('rucio','/rucio')
ec = self.replaceQueuedataField(seprodpath, spath)
tolog("Updated %s to: %s" % (seprodpath, spath))
elif spath.endswith('rucio/'):
tolog("!!WARNING!!1234!! rucio path in %s is not correctly formatted: %s" % (seprodpath, spath))
if spath.endswith('/rucio/'):
spath = spath.replace('rucio/','rucio')
else:
spath = spath.replace('rucio/','/rucio')
ec = self.replaceQueuedataField(seprodpath, spath)
tolog("Updated %s to: %s" % (seprodpath, spath))
def postProcessQueuedata(self, queuename, pshttpurl, thisSite, _jobrec, force_devpilot):
""" Update queuedata fields if necessary """
if 'pandadev' in pshttpurl or force_devpilot or thisSite.sitename == "CERNVM":
ec = self.replaceQueuedataField("status", "online")
# if thisSite.sitename == "RAL-LCG2_MCORE":
# ec = self.replaceQueuedataField("copytool", "gfal-copy")
# ec = self.replaceQueuedataField("objectstore", "root://atlas-objectstore.cern.ch/|eventservice^/atlas/eventservice|logs^/atlas/logs")
# ec = self.replaceQueuedataField("catchall", "log_to_objectstore")
# if thisSite.sitename == "GoeGrid":
# ec = self.replaceQueuedataField("catchall", "allow_alt_stageout")
# ec = self.replaceQueuedataField("catchall", "force_alt_stageout allow_alt_stageout")
# if thisSite.sitename == "ANALY_CERN_SLC6":
# ec = self.replaceQueuedataField("catchall", "stdout_to_text_indexer")
# if thisSite.sitename == "ANALY_CERN_SLC6":
# ec = self.replaceQueuedataField("copysetupin", "/cvmfs/atlas.cern.ch/repo/sw/local/xrootdsetup.sh^False^True")
# ec = self.replaceQueuedataField("timefloor", "0")
if thisSite.sitename == "UTA_PAUL_TEST" or thisSite.sitename == "ANALY_UTA_PAUL_TEST":
ec = self.replaceQueuedataField("status", "online")
# ec = self.replaceQueuedataField("objectstore", "eventservice^root://atlas-objectstore.cern.ch//atlas/eventservice|logs^root://xrados.cern.ch//atlas/logs")
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|https^s3://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("objectstore", "s3://cephgw.usatlas.bnl.gov:8443/|eventservice^/atlas_pilot_bucket/eventservice|logs^/atlas_pilot_bucket/logs")
ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://cephgw.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|http^http://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw.usatlas.bnl.gov:8443//atlas_eventservice|logs^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs|https^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs")
ec = self.replaceQueuedataField("catchall", "log_to_objectstore stdout_to_text_indexer")
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://atlasgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://atlasgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|https^s3://atlasgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("objectstore", "eventservice^root://atlas-objectstore.cern.ch//atlas/eventservice|logs^root://atlas-objectstore.cern.ch//atlas/logs|https^https://atlas-objectstore.cern.ch:1094//atlas/logs")
# ec = self.replaceQueuedataField("objectstore", "root://atlas-objectstore.cern.ch/|eventservice^/atlas/eventservice|logs^/atlas/logs")
#ec = self.replaceQueuedataField("retry", "False")
ec = self.replaceQueuedataField("allowfax", "True")
ec = self.replaceQueuedataField("timefloor", "0")
# ec = self.replaceQueuedataField("copytoolin", "fax")
# ec = self.replaceQueuedataField("copytool", "lsm")
# ec = self.replaceQueuedataField("catchall", "stdout_to_text_indexer")
ec = self.replaceQueuedataField("faxredirector", "root://glrd.usatlas.org/")
# ec = self.replaceQueuedataField("copyprefixin", "srm://gk05.swt2.uta.edu^gsiftp://gk01.swt2.uta.edu")
# ec = self.replaceQueuedataField("copyprefixin", "^srm://gk05.swt2.uta.edu")
# Event Service tests:
# now set in AGIS ec = self.replaceQueuedataField("copyprefixin", "srm://gk05.swt2.uta.edu^root://xrdb.local:1094")
# ec = self.replaceQueuedataField("corecount", "4")
ec = self.replaceQueuedataField("appdir", "/cvmfs/atlas.cern.ch/repo/sw|nightlies^/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies")
if os.environ.get("COPYTOOL"):
ec = self.replaceQueuedataField("copytool", os.environ.get("COPYTOOL"))
if os.environ.get("COPYTOOLIN"):
ec = self.replaceQueuedataField("copytoolin", os.environ.get("COPYTOOLIN"))
# if thisSite.sitename == "MWT2_SL6":
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw01.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://cephgw01.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|https^https://cephgw01.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("timefloor", "0")
# if thisSite.sitename == "CERN_PROD_MCORE":
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw.usatlas.bnl.gov:8443//atlas_eventservice|logs^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs|https^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs")
# ec = self.replaceQueuedataField("timefloor", "0")
# if thisSite.sitename == "MWT2_MCORE":
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw.usatlas.bnl.gov:8443//atlas_eventservice|logs^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs|https^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs")
# ec = self.replaceQueuedataField("copyprefixin", "srm://uct2-dc1.uchicago.edu^srm://uct2-dc1.uchicago.edu")
# ec = self.replaceQueuedataField("timefloor", "0")
# if thisSite.sitename == "BNL_PROD_MCORE":
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw.usatlas.bnl.gov:8443//atlas_eventservice|logs^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs|https^s3://cephgw.usatlas.bnl.gov:8443//atlas_logs")
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|https^s3://cephgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://ceph003.usatlas.bnl.gov:8443//atlas/eventservice|logs^s3://ceph003.usatlas.bnl.gov:8443//atlas/logs|https^https://ceph007.usatlas.bnl.gov:8443//atlas/logs")
# ec = self.replaceQueuedataField("catchall", "log_to_objectstore stdout_to_text_indexer")
# ec = self.replaceQueuedataField("copyprefixin", "srm://dcsrm.usatlas.bnl.gov^root://dcdcap01.usatlas.bnl.gov:1094")
# ec = self.replaceQueuedataField("timefloor", "0")
# ec = self.replaceQueuedataField("timefloor", "0")
# ec = self.replaceQueuedataField("appdir", "/cvmfs/atlas.cern.ch/repo/sw|nightlies^/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies")
# if thisSite.sitename == "SWT2_CPB" or thisSite.sitename == "AGLT2_SL6":
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://cephgw.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://cephgw.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|https^s3://cephgw.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("catchall", "log_to_objectstore,stdout_to_text_indexer")
# ec = self.replaceQueuedataField("copyprefixin", "srm://uct2-dc1.uchicago.edu,root://xrddoor.mwt2.org:1096")
# ec = self.replaceQueuedataField("objectstore", "eventservice^s3://atlasgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/eventservice|logs^s3://atlasgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs|https^s3://atlasgw02.usatlas.bnl.gov:8443//atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("timefloor", "0")
# if thisSite.sitename == 'BNL_EC2E1':
# ec = self.replaceQueuedataField("objectstore", "s3://cephgw.usatlas.bnl.gov:8443/|eventservice^/atlas_pilot_bucket/eventservice|logs^/atlas_pilot_bucket/logs")
#ec = self.replaceQueuedataField("copyprefixin", "srm://dcsrm.usatlas.bnl.gov/pnfs/usatlas.bnl.gov/BNLT0D1/rucio^s3://ceph003.usatlas.bnl.gov:8443//atlas/eventservice")
#ec = self.replaceQueuedataField("copyprefix", "srm://dcsrm.usatlas.bnl.gov/pnfs/usatlas.bnl.gov/BNLT0D1/rucio^s3://ceph003.usatlas.bnl.gov:8443//atlas/eventservice")
# ec = self.replaceQueuedataField("copyprefixin", "srm://dcsrm.usatlas.bnl.gov^s3://s3.amazonaws.com:80//atlas_pilot_bucket/eventservice")
# ec = self.replaceQueuedataField("copyprefix", "srm://dcsrm.usatlas.bnl.gov:8443^s3://s3.amazonaws.com:80//atlas_pilot_bucket/eventservice")
# ec = self.replaceQueuedataField("copyprefixin", "srm://aws01.racf.bnl.gov/mnt/atlasdatadisk,srm://aws01.racf.bnl.gov/mnt/atlasuserdisk,srm://aws01.racf.bnl.gov/mnt/atlasproddisk^s3://s3.amazonaws.com:80//s3-atlasdatadisk-racf,s3://s3.amazonaws.com:80//s3-atlasuserdisk-racf,s3://s3.amazonaws.com:80//s3-atlasproddisk-racf")
# ec = self.replaceQueuedataField("copyprefix", "srm://aws01.racf.bnl.gov/mnt/atlasdatadisk,srm://aws01.racf.bnl.gov/mnt/atlasuserdisk,srm://aws01.racf.bnl.gov/mnt/atlasproddisk^s3://s3.amazonaws.com:80//s3-atlasdatadisk-racf,s3://s3.amazonaws.com:80//s3-atlasuserdisk-racf,s3://s3.amazonaws.com:80//s3-atlasproddisk-racf")
# ec = self.replaceQueuedataField("se", "token:ATLASPRODDISK:srm://aws01.racf.bnl.gov:8443/srm/managerv2?SFN=")
# ec = self.replaceQueuedataField("seprodpath", "/mnt/atlasproddisk/rucio,/mnt/atlasdatadisk/rucio,/mnt/atlasuserdisk/rucio")
# ec = self.replaceQueuedataField("setokens", "ATLASPRODDISK,ATLASDATADISK,ATLASUSERDISK")
# ec = self.replaceQueuedataField("copytoolin", "S3")
# ec = self.replaceQueuedataField("copytool", "S3")
# if thisSite.sitename == "NERSC_Edison":
# ec = self.replaceQueuedataField("copytool", "gfal-copy")
# ec = self.replaceQueuedataField("copytoolin", "gfal-copy")
# ec = self.replaceQueuedataField("appdir", "/cvmfs/atlas.cern.ch/repo/sw|nightlies^/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies")
#ec = self.replaceQueuedataField("appdir", "/global/homes/w/wguan/software/Athena")
# Edison
#ec = self.replaceQueuedataField("appdir", "/project/projectdirs/atlas/software")
# ec = self.replaceQueuedataField("appdir", "/scratch1/scratchdirs/tsulaia/sw/software")
##ec = self.replaceQueuedataField("envsetup", "source /global/homes/w/wguan/software/emi/current/setup.sh")
# ec = self.replaceQueuedataField("envsetup", "source /global/project/projectdirs/atlas/pilot/grid_env/emi/current/setup.sh")
# ec = self.replaceQueuedataField("copysetup", "/global/project/projectdirs/atlas/pilot/grid_env/emi/current/setup.sh")
# ec = self.replaceQueuedataField("copysetup", "/global/project/projectdirs/atlas/pilot/grid_env/setup.sh")
# ec = self.replaceQueuedataField("objectstore", "s3://s3.amazonaws.com:80/|eventservice^/atlas_pilot_bucket/eventservice|logs^/atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("objectstore", "s3://cephgw.usatlas.bnl.gov:8443/|eventservice^/atlas_pilot_bucket/eventservice|logs^/atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("timefloor", "0")
# ec = self.replaceQueuedataField("coreCount", "0")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore")
#ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=backfill,queue=debug,backfill_queue=regular,max_events=2100,initialtime_m=8,time_per_event_m=10,repo=m2015,nodes=26,min_nodes=2,max_nodes=50,partition=edison,min_walltime_m=28,walltime_m=30,max_walltime_m=30,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=23")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=normal,queue=regular,backfill_queue=regular,max_events=2000,initialtime_m=8,time_per_event_m=10,repo=m2015,nodes=50,min_nodes=45,max_nodes=50,partition=edison,min_walltime_m=118,walltime_m=120,max_walltime_m=120,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=23")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=normal,queue=regular,backfill_queue=regular,max_events=20000,initialtime_m=13,time_per_event_m=13,repo=m2015,nodes=20,min_nodes=20,max_nodes=60,partition=edison,min_walltime_m=178,walltime_m=180,max_walltime_m=180,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=23")
#ec = self.replaceQueuedataField("catchall", "HPC_HPC,mode=backfill,backfill_queue=regular")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=normal,queue=regular,backfill_queue=regular,max_events=200000,initialtime_m=13,time_per_event_m=13,repo=m1523,nodes=50,min_nodes=20,max_nodes=600,partition=edison,min_walltime_m=58,walltime_m=60,max_walltime_m=180,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=24,stageout_threads=8,copy_input_files=false")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=normal,queue=regular,backfill_queue=regular,max_events=2000,initialtime_m=13,time_per_event_m=13,repo=m670,nodes=11,min_nodes=10,max_nodes=20,partition=edison,min_walltime_m=60,walltime_m=90,max_walltime_m=185,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=24,stageout_threads=8,copy_input_files=false")
#backfill
#ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=backfill,queue=regular,backfill_queue=regular,max_events=2000,initialtime_m=13,time_per_event_m=13,repo=m670,nodes=13,min_nodes=8,max_nodes=15,partition=edison,min_walltime_m=60,walltime_m=180,max_walltime_m=240,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=24,stageout_threads=8,copy_input_files=false")
#debug
#ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,mode=normal,queue=debug,backfill_queue=regular,max_events=200000,initialtime_m=13,time_per_event_m=13,repo=m1523,nodes=2,min_nodes=2,max_nodes=3,partition=edison,min_walltime_m=28,walltime_m=30,max_walltime_m=30,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=24,stageout_threads=8,copy_input_files=false")
# if thisSite.sitename == "NERSC_Hopper":
# ec = self.replaceQueuedataField("copytool", "gfal-copy")
# ec = self.replaceQueuedataField("copytoolin", "gfal-copy")
# ec = self.replaceQueuedataField("appdir", "/cvmfs/atlas.cern.ch/repo/sw|nightlies^/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies")
#ec = self.replaceQueuedataField("appdir", "/global/homes/w/wguan/software/Athena")
# Hopper
# ec = self.replaceQueuedataField("appdir", "/scratch/scratchdirs/tsulaia/software")
##ec = self.replaceQueuedataField("envsetup", "source /global/homes/w/wguan/software/emi/current/setup.sh")
# ec = self.replaceQueuedataField("envsetup", "source /global/project/projectdirs/atlas/pilot/grid_env/emi/current/setup.sh")
# ec = self.replaceQueuedataField("copysetup", "/global/project/projectdirs/atlas/pilot/grid_env/emi/current/setup.sh")
# ec = self.replaceQueuedataField("copysetup", "/global/project/projectdirs/atlas/pilot/grid_env/setup.sh")
#ec = self.replaceQueuedataField("objectstore", "s3://s3.amazonaws.com:80/|eventservice^/atlas_pilot_bucket/eventservice|logs^/atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("objectstore", "s3://cephgw02.usatlas.bnl.gov:8443/|eventservice^/atlas_pilot_bucket/eventservice|logs^/atlas_pilot_bucket/logs")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore")
# ec = self.replaceQueuedataField("catchall", "HPC_HPC,log_to_objectstore,queue=debug,max_events=2000,initialtime_m=8, time_per_event_m=10,repo=m2015,nodes=2,min_nodes=2,max_nodes=3,partition=hopper,min_walltime_m=28,walltime_m=30,max_walltime_m=30,cpu_per_node=24,mppnppn=1,ATHENA_PROC_NUMBER=23")
_status = self.readpar('status')
if _status != None and _status != "":
if _status.upper() == "OFFLINE":
tolog("Site %s is currently in %s mode - aborting pilot" % (thisSite.sitename, _status.lower()))
return -1, None, None
else:
tolog("Site %s is currently in %s mode" % (thisSite.sitename, _status.lower()))
# Override pilot run options
temp_jobrec = self.readpar('retry')
if temp_jobrec.upper() == "TRUE":
tolog("Job recovery turned on")
_jobrec = True
elif temp_jobrec.upper() == "FALSE":
tolog("Job recovery turned off")
_jobrec = False
else:
tolog("Job recovery variable (retry) not set")
# Make sure that se[prod]path does not contain a malformed /rucio string (rucio/)
# if so, correct it
self.verifyRucioPath(readpar('sepath'), seprodpath='sepath')
self.verifyRucioPath(readpar('seprodpath'), seprodpath='seprodpath')
# Evaluate the queuedata if needed
self.evaluateQueuedata()
# Set pilot variables in case they have not been set by the pilot launcher
thisSite = self.setUnsetVars(thisSite)
return 0, thisSite, _jobrec
def getQueuedata(self, queuename, forceDownload=False, alt=False, url='http://pandaserver.cern.ch'):
""" Download the queuedata if not already downloaded """
ec = 0
hasQueuedata = False
if queuename != "":
ec, hasQueuedata = super(ATLASSiteInformation, self).getQueuedata(queuename, forceDownload=forceDownload, alt=alt, url=url)
if ec != 0:
tolog("!!FAILED!!1999!! getQueuedata failed: %d" % (ec))
ec = self.__error.ERR_QUEUEDATA
if not hasQueuedata:
tolog("!!FAILED!!1999!! Found no valid queuedata - aborting pilot")
ec = self.__error.ERR_QUEUEDATANOTOK
else:
tolog("curl command returned valid queuedata")
else:
tolog("WARNING: queuename not set (queuedata will not be downloaded and symbols not evaluated)")
return ec, hasQueuedata
def getSpecialAppdir(self, value):
""" Get a special appdir depending on whether env variable 'value' exists """
ec = 0
_appdir = ""
# does the directory exist?
if os.environ.has_key(value):
# expand the value in case it contains further environmental variables
_appdir = os.path.expandvars(os.environ[value])
tolog("Environment has variable $%s = %s" % (value, _appdir))
if _appdir == "":
tolog("!!WARNING!!2999!! Environmental variable not set: %s" % (value))
ec = self.__error.ERR_SETUPFAILURE
else:
# store the evaluated symbol in appdir
if self.replaceQueuedataField('appdir', _appdir, verbose=False):
tolog("Updated field %s in queuedata: %s" % ('appdir', _appdir))
else:
tolog("!!WARNING!!2222!! Queuedata field could not be updated, cannot continue")
ec = self.__error.ERR_SETUPFAILURE
else:
tolog("!!WARNING!!2220!! Environmental variable %s is not defined" % (value))
return ec, _appdir
def extractAppdir(self, appdir, processingType, homePackage):
""" extract and (re-)confirm appdir from possibly encoded schedconfig.appdir """
# e.g. for CERN:
# processingType = unvalid
# schedconfig.appdir = /afs/cern.ch/atlas/software/releases|release^/afs/cern.ch/atlas/software/releases|unvalid^/afs/cern.ch/atlas/software/unvalidated/caches
# -> appdir = /afs/cern.ch/atlas/software/unvalidated/caches
# if processingType does not match anything, use the default first entry (/afs/cern.ch/atlas/software/releases)
# NOTE: this function can only be called after a job has been downloaded since processType is unknown until then
ec = 0
tolog("Extracting appdir (ATLAS: current value=%s)" % (appdir))
# override processingType for analysis jobs that use nightlies
if "rel_" in homePackage:
tolog("Temporarily modifying processingType from %s to nightlies" % (processingType))
processingType = "nightlies"
value = 'VO_ATLAS_NIGHTLIES_DIR'
if os.environ.has_key(value):
ec, _appdir = self.getSpecialAppdir(value)
if ec == 0 and _appdir != "":
return ec, _appdir
elif "AtlasP1HLT" in homePackage or "AtlasHLT" in homePackage:
value = 'VO_ATLAS_RELEASE_DIR'
tolog("Encountered HLT homepackage: %s, will look for a set $%s" % (homePackage, value))
# does a HLT directory exist?
if os.environ.has_key(value):
ec, _appdir = self.getSpecialAppdir(value)
if ec == 0 and _appdir != "":
return ec, _appdir
else:
tolog('$%s is not set' % (value))
_appdir = appdir
if "|" in _appdir and "^" in _appdir:
# extract appdir by matching with processingType
appdir_split = _appdir.split("|")
appdir_default = appdir_split[0]
# loop over all possible appdirs
sub_appdir = ""
for i in range(1, len(appdir_split)):
# extract the processingType and sub appdir
sub_appdir_split = appdir_split[i].split("^")
if processingType == sub_appdir_split[0]:
# found match
sub_appdir = sub_appdir_split[1]
break
if sub_appdir == "":
_appdir = appdir_default
tolog("Using default appdir: %s (processingType = \'%s\')" % (_appdir, processingType))
else:
_appdir = sub_appdir
tolog("Matched processingType %s to appdir %s" % (processingType, _appdir))
else:
# check for empty appdir's on LCG
if _appdir == "":
if os.environ.has_key("VO_ATLAS_SW_DIR"):
_appdir = os.environ["VO_ATLAS_SW_DIR"]
tolog("Set site.appdir to %s" % (_appdir))
else:
tolog("Got plain appdir: %s" % (_appdir))
# verify the existence of appdir
if os.path.exists(_appdir):
tolog("Software directory %s exists" % (_appdir))
# force queuedata update
_ec = self.replaceQueuedataField("appdir", _appdir)
del _ec
else:
if _appdir != "":
tolog("!!FAILED!!1999!! Software directory does not exist: %s" % (_appdir))
else:
tolog("!!FAILED!!1999!! Software directory (appdir) is not set")
ec = self.__error.ERR_NOSOFTWAREDIR
return ec, _appdir
def getFileSystemRootPath(self):
""" Return the root path of the local file system """
# Returns "/cvmfs" or "/(some path)/cvmfs" in case the expected file system root path is not
# where it usually is (e.g. on an HPC). See example implementation in self.getLocalROOTSetup()
if os.environ.has_key('ATLAS_SW_BASE'):
path = os.environ['ATLAS_SW_BASE']
else:
path = '/cvmfs'
return path
def getLocalROOTSetup(self):
""" Build command to prepend the xrdcp command [xrdcp will in general not be known in a given site] """
# cmd = 'export ATLAS_LOCAL_ROOT_BASE=%s/atlas.cern.ch/repo/ATLASLocalRootBase; ' % (self.getFileSystemRootPath())
# cmd += 'source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet; '
# cmd += 'source ${ATLAS_LOCAL_ROOT_BASE}/packageSetups/atlasLocalROOTSetup.sh --rootVersion ${rootVersionVal} --skipConfirm; '
cmd = 'source %s/atlas.cern.ch/repo/sw/local/xrootdsetup.sh' % (self.getFileSystemRootPath())
return cmd
def getLocalEMISetup(self):
""" Return the path for the local EMI setup """
cmd = 'export ATLAS_LOCAL_ROOT_BASE=%s/atlas.cern.ch/repo/ATLASLocalRootBase; ' % (self.getFileSystemRootPath())
cmd += 'source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet; '
cmd += 'source ${ATLAS_LOCAL_ROOT_BASE}/packageSetups/atlasLocalEmiSetup.sh --force'
#cmd += 'source ${ATLAS_LOCAL_ROOT_BASE}/x86_64/emi/current/setup.sh'
return cmd
# Required for S3 objectstore
def getSecurityKey(self, privateKeyName, publicKeyName):
""" Return the key pair """
keyName = privateKeyName + "_" + publicKeyName
if keyName in self.__securityKeys.keys():
return self.__securityKeys[keyName]
else:
try:
#import environment
#env = environment.set_environment()
sslCert = self.getSSLCertificate()
sslKey = sslCert
node={}
node['privateKeyName'] = privateKeyName
node['publicKeyName'] = publicKeyName
#host = '%s:%s' % (env['pshttpurl'], str(env['psport'])) # The key pair is not set on other panda server
host = 'pandaserver.cern.ch:25443'
path = '/server/panda/getKeyPair'
conn = httplib.HTTPSConnection(host, key_file=sslKey, cert_file=sslCert, timeout=120)
conn.request('POST', path, urllib.urlencode(node))
resp = conn.getresponse()
data = resp.read()
conn.close()
dic = cgi.parse_qs(data)
if dic["StatusCode"][0] == "0":
self.__securityKeys[keyName] = {"publicKey": dic["publicKey"][0], "privateKey": dic["privateKey"][0]}
return self.__securityKeys[keyName]
else:
tolog("!!WARNING!!4444!! Failed to get key from PanDA server:")
tolog("data = %s" % str(data))
except:
_type, value, traceBack = sys.exc_info()
tolog("!!WARNING!!4445!! Failed to getKeyPair for (%s, %s)" % (privateKeyName, publicKeyName))
tolog("ERROR: %s %s" % (_type, value))
return {"publicKey": None, "privateKey": None}
if __name__ == "__main__":
os.environ['PilotHomeDir'] = os.getcwd()
si = ATLASSiteInformation()
tolog("Experiment: %s" % (si.getExperiment()))
cloud = "CERN"
queuename = si.getTier1Queue(cloud)
if queuename != "":
tolog("Cloud %s has Tier-1 queue %s" % (cloud, queuename))
else:
tolog("Failed to find a Tier-1 queue name for cloud %s" % (cloud))
keyPair = si.getSecurityKey('BNL_ObjectStoreKey', 'BNL_ObjectStoreKey.pub')
print keyPair
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/transoffset.py | 13 | 1666 | #!/usr/bin/env python
'''
This illustrates the use of transforms.offset_copy to
make a transform that positions a drawing element such as
a text string at a specified offset in screen coordinates
(dots or inches) relative to a location given in any
coordinates.
Every Artist--the mpl class from which classes such as
Text and Line are derived--has a transform that can be
set when the Artist is created, such as by the corresponding
pylab command. By default this is usually the Axes.transData
transform, going from data units to screen dots. We can
use the offset_copy function to make a modified copy of
this transform, where the modification consists of an
offset.
'''
import pylab as P
from matplotlib.transforms import offset_copy
X = P.arange(7)
Y = X**2
fig = P.figure(figsize=(5,10))
ax = P.subplot(2,1,1)
# If we want the same offset for each text instance,
# we only need to make one transform. To get the
# transform argument to offset_copy, we need to make the axes
# first; the subplot command above is one way to do this.
transOffset = offset_copy(ax.transData, fig=fig,
x = 0.05, y=0.10, units='inches')
for x, y in zip(X, Y):
P.plot((x,),(y,), 'ro')
P.text(x, y, '%d, %d' % (int(x),int(y)), transform=transOffset)
# offset_copy works for polar plots also.
ax = P.subplot(2,1,2, polar=True)
transOffset = offset_copy(ax.transData, fig=fig, y = 6, units='dots')
for x, y in zip(X, Y):
P.polar((x,),(y,), 'ro')
P.text(x, y, '%d, %d' % (int(x),int(y)),
transform=transOffset,
horizontalalignment='center',
verticalalignment='bottom')
P.show()
| mit |
niknow/scipy | doc/source/tutorial/examples/normdiscr_plot1.py | 84 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
mit-crpg/openmc | tests/regression_tests/mgxs_library_condense/test.py | 7 | 2471 | import hashlib
import openmc
import openmc.mgxs
from openmc.examples import pwr_pin_cell
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test all MGXS types
self.mgxs_lib.mgxs_types = openmc.mgxs.MGXS_TYPES + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'mesh'
# Instantiate a tally mesh
mesh = openmc.RegularMesh(mesh_id=1)
mesh.dimension = [2, 2]
mesh.lower_left = [-100., -100.]
mesh.width = [100., 100.]
self.mgxs_lib.domains = [mesh]
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a condensed 1-group MGXS Library
one_group = openmc.mgxs.EnergyGroups([0., 20.e6])
condense_lib = self.mgxs_lib.get_condensed_library(one_group)
# Build a string from Pandas Dataframe for each 1-group MGXS
outstr = ''
for domain in condense_lib.domains:
for mgxs_type in condense_lib.mgxs_types:
mgxs = condense_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_mgxs_library_condense():
# Use the pincell model
model = pwr_pin_cell()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
| mit |
f3r/scikit-learn | sklearn/naive_bayes.py | 29 | 28917 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
rudhir-upretee/Sumo17_With_Netsim | tools/projects/TaxiFCD_Krieg/src/taxiQuantity/QuantityOverDay.py | 1 | 2316 | # -*- coding: Latin-1 -*-
"""
@file QuantityOverDay.py
@author Sascha Krieg
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-04-01
Counts for an given interval all unique taxis in an FCD file and draws the result as a bar chart.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from pylab import *
import datetime
from matplotlib.dates import MinuteLocator, HourLocator, DateFormatter
import util.Path as path
#global vars
intervalDelta=datetime.timedelta(minutes=60)
intervalDate=datetime.datetime( 2007, 7, 18,0,0 )
format="%Y-%m-%d %H:%M:%S"
barList={}
def main():
print "start program"
countTaxis()
#a figure (chart) where we add the bar's and change the axis properties
fig = figure()
ax = fig.add_subplot(111)
#set the width of the bar to interval-size
barWidth=date2num(intervalDate+intervalDelta)-date2num(intervalDate)
#add a bar with specified values and width
ax.bar(date2num(barList.keys()),barList.values(),width=barWidth)
#set the x-Axis to show the hours
ax.xaxis.set_major_locator(HourLocator())
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
ax.xaxis.set_minor_locator(MinuteLocator())
ax.grid(True)
xlabel('Zeit (s)')
ylabel('Quantit'+u'\u00E4'+'t')
title('Menge der Taxis im VLS-Gebiet')
ax.autoscale_view()
#shows the text of the x-axis in a way that it looks nice
fig.autofmt_xdate()
#display the chart
show()
def countTaxis():
"""Analyzes the FCD and generates a list which is used to draw the bar chart."""
global barList
global intervalDate
taxis=set()
#intervalDate+=intervalDelta
inputFile=open(path.vls,'r')
for line in inputFile:
words=line.split("\t")
#if date >actual interval (used intervalDate strptime function to get String in a datetime-format)
if intervalDate+intervalDelta>intervalDate.strptime(words[0],format):
taxis.add(words[4])
#print words
else:
barList[intervalDate]=len(taxis)
intervalDate+=intervalDelta
taxis.clear()
#start the program
main() | gpl-3.0 |
Aghosh993/QuadcopterCodebase | GroundSoftware/csv_display.py | 1 | 1948 | #!/usr/bin/python3
import matplotlib as mp
import numpy as np
import matplotlib.pyplot as plt
import argparse
message_set = "sf11_bno055 v_z ahrs_rp yaw_height flow bno055_att esc_cmds"
def plot_file(file):
fig = plt.figure()
input_data = np.loadtxt(file, delimiter=', ')
col = input_data.shape[1]
xdata = input_data[:,0]
subplot_mask = 100+(col-1)*10
ydata = []
plots = []
for i in range(col-1):
ydata.append(input_data[:,i+1])
plots.append(fig.add_subplot(subplot_mask+i+1))
plots[i].plot(xdata,ydata[i])
plt.show()
def plot_fileset(files):
fig = []
j=0
for file in files:
input_data = np.loadtxt(file, delimiter=', ')
if input_data.size > 2:
fig.append(plt.figure())
fig[j].suptitle(file)
col = input_data.shape[1]
xdata = input_data[:,0]
subplot_mask = 100+(col-1)*10
ydata = []
plots = []
for i in range(col-1):
ydata.append(input_data[:,i+1])
plots.append(fig[j].add_subplot(subplot_mask+i+1))
plots[i].plot(xdata,ydata[i])
j = j+1
plt.show()
def main():
parser = argparse.ArgumentParser(description='Display a CSV file')
parser.add_argument('-t', '--timestamp', metavar='timestamp', nargs=1, help='UTC timestamp of experimental fileset to plot')
parser.add_argument('-d', '--log_dir', metavar='log_dir', nargs=1, help='Path to directory containing experimental fileset')
parser.add_argument('-f', '--filename', metavar='filename', nargs=1, help='CSV file to open')
args = parser.parse_args()
if args.filename:
fn = args.filename[0]
plot_file(fn)
else:
print("Attempting to plot full experimental run from time ")
if args.timestamp:
ts = args.timestamp[0]
msgs = message_set.split()
if args.log_dir:
logdir = args.log_dir[0]
else:
logdir = "../logs"
file_set = []
for msg in msgs:
file_name = logdir+"/"+msg+"_"+ts+".csv"
file_set.append(file_name)
plot_fileset(file_set)
if __name__ == '__main__':
main() | gpl-3.0 |
bzero/arctic | tests/util.py | 2 | 1376 | from contextlib import contextmanager
from cStringIO import StringIO
from dateutil.rrule import rrule, DAILY
import dateutil
from datetime import datetime as dt
import pandas
import numpy as np
import sys
def read_str_as_pandas(ts_str):
labels = [x.strip() for x in ts_str.split('\n')[0].split('|')]
pd = pandas.read_csv(StringIO(ts_str), sep='|', index_col=0,
date_parser=dateutil.parser.parse)
# Trim the whitespace on the column names
pd.columns = labels[1:]
pd.index.name = labels[0]
return pd
def get_large_ts(size=2500):
timestamps = list(rrule(DAILY, count=size, dtstart=dt(1970, 1, 1), interval=1))
pd = pandas.DataFrame(index=timestamps, data={'n' + str(i): np.random.random_sample(size) for i in range(size)})
pd.index.name = 'index'
return pd
@contextmanager
def _save_argv():
args = sys.argv[:]
yield
sys.argv = args
def run_as_main(fn, *args):
""" Run a given function as if it was the
system entry point, eg for testing scripts.
Eg::
from scripts.Foo import main
run_as_main(main, 'foo','bar')
This is equivalent to ``Foo foo bar``, assuming
``scripts.Foo.main`` is registered as an entry point.
"""
with _save_argv():
print("run_as_main: %s" % str(args))
sys.argv = ['progname'] + list(args)
return fn()
| lgpl-2.1 |
rmeertens/paparazzi | sw/misc/attitude_reference/att_ref_gui.py | 49 | 12483 | #!/usr/bin/env python
#
# Copyright (C) 2014 Antoine Drouin
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
"""
This is a graphical user interface for playing with reference attitude
"""
# https://gist.github.com/zed/b966b5a04f2dfc16c98e
# https://gist.github.com/nzjrs/51686
# http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
# http://chimera.labs.oreilly.com/books/1230000000393/ch12.html#_problem_208 <- threads
# TODO:
# -cancel workers
#
#
#
from __future__ import print_function
from gi.repository import Gtk, GObject
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
import matplotlib.font_manager as fm
import math, threading, numpy as np, scipy.signal, pdb, copy, logging
import pat.utils as pu
import pat.algebra as pa
import control as ctl
import gui
class Reference(gui.Worker):
def __init__(self, sp, ref_impl=ctl.att_ref_default, omega=6., xi=0.8, max_vel=pu.rad_of_deg(100),
max_accel=pu.rad_of_deg(500)):
gui.Worker.__init__(self)
self.impl = ref_impl()
self.sp = sp
self.reset_outputs(sp)
self.update(sp, ref_impl, omega, xi, max_vel, max_accel)
self.do_work = True
def reset_outputs(self, sp):
self.euler = np.zeros((len(sp.time), pa.e_size))
self.quat = np.zeros((len(sp.time), pa.q_size))
self.vel = np.zeros((len(sp.time), pa.r_size))
self.accel = np.zeros((len(sp.time), pa.r_size))
def update_type(self, _type):
#print('update_type', _type)
self.impl = _type()
self.do_work = True
#self.recompute()
def update_param(self, p, v):
#print('update_param', p, v)
self.impl.set_param(p, v)
self.do_work = True
#self.recompute()
def update_sp(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None):
self.reset_outputs(sp)
self.update(sp, ref_impl, omega, xi, max_vel, max_accel)
self.do_work = True
#self.recompute()
def update(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None):
self.sp = sp
if ref_impl is not None:
self.impl = ref_impl()
if omega is not None:
self.impl.set_param('omega', omega)
if xi is not None:
self.impl.set_param('xi', xi)
if max_vel is not None:
self.impl.set_param('max_vel', max_vel)
if max_accel is not None:
self.impl.set_param('max_accel', max_accel)
def recompute(self):
#print("recomputing...")
self.start((self.sp,))
def _work_init(self, sp):
#print('_work_init ', self, self.impl, sp, sp.dt)
self.euler = np.zeros((len(sp.time), pa.e_size))
self.quat = np.zeros((len(sp.time), pa.q_size))
self.vel = np.zeros((len(sp.time), pa.r_size))
self.accel = np.zeros((len(sp.time), pa.r_size))
euler0 = [0.3, 0.1, 0.2]
self.impl.set_euler(np.array(euler0))
self.quat[0], self.euler[0], self.vel[0], self.accel[0] = self.impl.quat, self.impl.euler, self.impl.vel, self.impl.accel
self.n_iter_per_step = float(len(sp.time)) / self.n_step
def _work_step(self, i, sp):
start, stop = int(i * self.n_iter_per_step), int((i + 1) * self.n_iter_per_step)
# print('_work_step of %s: i %i, start %i, stop %i' % (self.impl, i, start, stop))
for j in range(start, stop):
self.impl.update_quat(sp.quat[j], sp.dt)
self.quat[j], self.vel[j], self.accel[j] = self.impl.quat, self.impl.vel, self.impl.accel
self.euler[j] = pa.euler_of_quat(self.quat[j])
class Setpoint(object):
t_static, t_step_phi, t_step_theta, t_step_psi, t_step_random, t_nb = range(0, 6)
t_names = ["constant", "step phi", "step theta", "step psi", "step_random"]
def __init__(self, type=t_static, duration=10., step_duration=5., step_ampl=pu.rad_of_deg(10.)):
self.dt = 1. / 512
self.update(type, duration, step_duration, step_ampl)
def update(self, type, duration, step_duration, step_ampl):
self.type = type
self.duration, self.step_duration, self.step_ampl = duration, step_duration, step_ampl
self.time = np.arange(0., self.duration, self.dt)
self.euler = np.zeros((len(self.time), pa.e_size))
try:
i = [Setpoint.t_step_phi, Setpoint.t_step_theta, Setpoint.t_step_psi].index(self.type)
self.euler[:, i] = step_ampl / 2 * scipy.signal.square(math.pi / step_duration * self.time)
except Exception as e:
print(e)
pass
self.quat = np.zeros((len(self.time), pa.q_size))
for i in range(0, len(self.time)):
self.quat[i] = pa.quat_of_euler(self.euler[i])
class GUI(object):
def __init__(self, sp, refs):
self.b = Gtk.Builder()
self.b.add_from_file("ressources/att_ref_gui.xml")
w = self.b.get_object("window")
w.connect("delete-event", Gtk.main_quit)
mb = self.b.get_object("main_vbox")
self.plot = Plot(sp, refs)
mb.pack_start(self.plot, True, True, 0)
mb = self.b.get_object("main_hbox")
ref_classes = [ctl.att_ref_default, ctl.att_ref_sat_naive, ctl.att_ref_sat_nested, ctl.att_ref_sat_nested2,
ctl.AttRefFloatNative, ctl.AttRefIntNative]
self.ref_views = [gui.AttRefParamView('<b>Ref {}</b>'.format(i+1), ref_classes=ref_classes,
active_impl=r.impl) for i, r in enumerate(refs)]
for r in self.ref_views:
mb.pack_start(r, True, True, 0)
w.show_all()
class Plot(Gtk.Frame):
def __init__(self, sp, refs):
Gtk.Frame.__init__(self)
self.f = Figure()
self.canvas = FigureCanvas(self.f)
self.add(self.canvas)
self.set_size_request(1024, 600)
self.f.subplots_adjust(left=0.07, right=0.98, bottom=0.05, top=0.95,
hspace=0.2, wspace=0.2)
# self.buffer = self.canvas.get_snapshot()
def decorate(self, axis, title=None, ylab=None, legend=None):
# font_prop = fm.FontProperties(fname='Humor-Sans-1.0.ttf', size=14)
if title is not None:
axis.set_title(title) # , fontproperties=font_prop)
if ylab is not None:
axis.yaxis.set_label_text(ylab) # , fontproperties=font_prop)
if legend is not None:
axis.legend(legend) # , prop=font_prop)
axis.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
axis.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
def update(self, sp, refs):
title = [r'$\phi$', r'$\theta$', r'$\psi$']
legend = ['Ref1', 'Ref2', 'Setpoint']
for i in range(0, 3):
axis = self.f.add_subplot(331 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.euler[:, i]))
axis.plot(sp.time, pu.deg_of_rad(sp.euler[:, i]))
self.decorate(axis, title[i], *(('deg', legend) if i == 0 else (None, None)))
title = [r'$p$', r'$q$', r'$r$']
for i in range(0, 3):
axis = self.f.add_subplot(334 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.vel[:, i]))
self.decorate(axis, title[i], 'deg/s' if i == 0 else None)
title = [r'$\dot{p}$', r'$\dot{q}$', r'$\dot{r}$']
for i in range(0, 3):
axis = self.f.add_subplot(337 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.accel[:, i]))
self.decorate(axis, title[i], 'deg/s2' if i == 0 else None)
self.canvas.draw()
class Application(object):
def __init__(self):
self.sp = Setpoint()
self.refs = [Reference(self.sp), Reference(self.sp, ref_impl=ctl.AttRefFloatNative)]
for nref, r in enumerate(self.refs):
r.connect("progress", self.on_ref_update_progress, nref + 1)
r.connect("completed", self.on_ref_update_completed, nref + 1)
self.gui = GUI(self.sp, self.refs)
self.register_gui()
self.recompute_sequentially()
def on_ref_update_progress(self, ref, v, nref):
#print('progress', nref, v)
self.gui.ref_views[nref - 1].progress.set_fraction(v)
def on_ref_update_completed(self, ref, nref):
#print('on_ref_update_completed', ref, nref)
self.gui.ref_views[nref - 1].progress.set_fraction(1.0)
# recompute remaining refs (if any)
self.recompute_sequentially()
self.gui.plot.update(self.sp, self.refs)
def register_gui(self):
self.register_setpoint()
for i in range(0, 2):
self.gui.ref_views[i].connect(self._on_ref_changed, self._on_ref_param_changed, self.refs[i], self.gui.ref_views[i])
self.gui.ref_views[i].update_view(self.refs[i].impl)
def register_setpoint(self):
b = self.gui.b
c_sp_type = b.get_object("combo_sp_type")
for n in Setpoint.t_names:
c_sp_type.append_text(n)
c_sp_type.set_active(self.sp.type)
c_sp_type.connect("changed", self.on_sp_changed)
names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"]
widgets = [b.get_object(name) for name in names]
adjs = [Gtk.Adjustment(self.sp.duration, 1, 100, 1, 10, 0),
Gtk.Adjustment(self.sp.step_duration, 0.1, 10., 0.1, 1., 0),
Gtk.Adjustment(pu.deg_of_rad(self.sp.step_ampl), 0.1, 180., 1, 10., 0)]
for i, w in enumerate(widgets):
w.set_adjustment(adjs[i])
w.update()
w.connect("value-changed", self.on_sp_changed)
def recompute_sequentially(self):
"""
Somehow running two threads to update both references at the same time produces bogus data..
As a workaround we simply run them one after the other.
"""
for r in self.refs:
if r.running:
return
for r in self.refs:
if r.do_work:
r.recompute()
return
def on_sp_changed(self, widget):
b = self.gui.b
_type = b.get_object("combo_sp_type").get_active()
names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"]
_duration, _step_duration, _step_amplitude = [b.get_object(name).get_value() for name in names]
#print('_on_sp_changed', _type, _duration, _step_duration, _step_amplitude)
_step_amplitude = pu.rad_of_deg(_step_amplitude)
self.sp.update(_type, _duration, _step_duration, _step_amplitude)
# somehow running two threads to update both references at the same time produces bogus data..
# as a workaround we simply run them one after the other
for r in self.refs:
r.update_sp(self.sp)
#r.recompute()
self.recompute_sequentially()
def _on_ref_changed(self, widget, ref, view):
#print('_on_ref_changed', widget, ref, view)
ref.update_type(view.get_selected_ref_class())
view.update_ref_params(ref.impl)
self.recompute_sequentially()
def _on_ref_param_changed(self, widget, p, ref, view):
#print("_on_ref_param_changed: %s %s=%s" % (ref.impl.name, p, val))
val = view.spin_cfg[p]['d2r'](widget.get_value())
ref.update_param(p, val)
self.recompute_sequentially()
def run(self):
Gtk.main()
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
Application().run()
| gpl-2.0 |
marcoitur/Freecad_test | src/Mod/Plot/plotSeries/TaskPanel.py | 26 | 17784 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD as App
import FreeCADGui as Gui
from PySide import QtGui, QtCore
import Plot
from plotUtils import Paths
import matplotlib
from matplotlib.lines import Line2D
import matplotlib.colors as Colors
class TaskPanel:
def __init__(self):
self.ui = Paths.modulePath() + "/plotSeries/TaskPanel.ui"
self.skip = False
self.item = 0
self.plt = None
def accept(self):
return True
def reject(self):
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
self.form = form
self.retranslateUi()
self.fillStyles()
self.updateUI()
QtCore.QObject.connect(
form.items,
QtCore.SIGNAL("currentRowChanged(int)"),
self.onItem)
QtCore.QObject.connect(
form.label,
QtCore.SIGNAL("editingFinished()"),
self.onData)
QtCore.QObject.connect(
form.isLabel,
QtCore.SIGNAL("stateChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.style,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.marker,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.width,
QtCore.SIGNAL("valueChanged(double)"),
self.onData)
QtCore.QObject.connect(
form.size,
QtCore.SIGNAL("valueChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.color,
QtCore.SIGNAL("pressed()"),
self.onColor)
QtCore.QObject.connect(
form.remove,
QtCore.SIGNAL("pressed()"),
self.onRemove)
QtCore.QObject.connect(
Plot.getMdiArea(),
QtCore.SIGNAL("subWindowActivated(QMdiSubWindow*)"),
self.onMdiArea)
return False
def getMainWindow(self):
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise RuntimeError("No main window found")
def widget(self, class_id, name):
"""Return the selected widget.
Keyword arguments:
class_id -- Class identifier
name -- Name of the widget
"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
return form.findChild(class_id, name)
def retranslateUi(self):
"""Set the user interface locale strings."""
self.form.setWindowTitle(QtGui.QApplication.translate(
"plot_series",
"Configure series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"No label",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setText(
QtGui.QApplication.translate(
"plot_series",
"Remove serie",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "styleLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "markerLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Marker",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QListWidget, "items").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"List of available series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLineEdit, "label").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line title",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"If checked serie will not be considered for legend",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "lineStyle").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "markers").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QDoubleSpinBox, "lineWidth").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line width",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QSpinBox, "markerSize").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker size",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "color").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line and marker color",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Removes this serie",
None,
QtGui.QApplication.UnicodeUTF8))
def fillStyles(self):
"""Fill the style combo boxes with the availabel ones."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
# Line styles
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
string = "\'" + str(style) + "\'"
string += " (" + Line2D.lineStyles[style] + ")"
form.style.addItem(string)
# Markers
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
string = "\'" + str(marker) + "\'"
string += " (" + Line2D.markers[marker] + ")"
form.marker.addItem(string)
def onItem(self, row):
"""Executed when the selected item is modified."""
if not self.skip:
self.skip = True
self.item = row
self.updateUI()
self.skip = False
def onData(self):
"""Executed when the selected item data is modified."""
if not self.skip:
self.skip = True
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Set label
serie = Plot.series()[self.item]
if(form.isLabel.isChecked()):
serie.name = None
form.label.setEnabled(False)
else:
serie.name = form.label.text()
form.label.setEnabled(True)
# Set line style and marker
style = form.style.currentIndex()
linestyles = Line2D.lineStyles.keys()
serie.line.set_linestyle(linestyles[style])
marker = form.marker.currentIndex()
markers = Line2D.markers.keys()
serie.line.set_marker(markers[marker])
# Set line width and marker size
serie.line.set_linewidth(form.width.value())
serie.line.set_markersize(form.size.value())
plt.update()
# Regenerate series labels
self.setList()
self.skip = False
def onColor(self):
""" Executed when color pallete is requested. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.color = self.widget(QtGui.QPushButton, "color")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Show widget to select color
col = QtGui.QColorDialog.getColor()
# Send color to widget and serie
if col.isValid():
serie = plt.series[self.item]
form.color.setStyleSheet(
"background-color: rgb({}, {}, {});".format(col.red(),
col.green(),
col.blue()))
serie.line.set_color((col.redF(), col.greenF(), col.blueF()))
plt.update()
def onRemove(self):
"""Executed when the data serie must be removed."""
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Remove serie
Plot.removeSerie(self.item)
self.setList()
self.updateUI()
plt.update()
def onMdiArea(self, subWin):
"""Executed when a new window is selected on the mdi area.
Keyword arguments:
subWin -- Selected window.
"""
plt = Plot.getPlot()
if plt != subWin:
self.updateUI()
def updateUI(self):
""" Setup UI controls values if possible """
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
plt = Plot.getPlot()
form.items.setEnabled(bool(plt))
form.label.setEnabled(bool(plt))
form.isLabel.setEnabled(bool(plt))
form.style.setEnabled(bool(plt))
form.marker.setEnabled(bool(plt))
form.width.setEnabled(bool(plt))
form.size.setEnabled(bool(plt))
form.color.setEnabled(bool(plt))
form.remove.setEnabled(bool(plt))
if not plt:
self.plt = plt
form.items.clear()
return
self.skip = True
# Refill list
if self.plt != plt or len(Plot.series()) != form.items.count():
self.plt = plt
self.setList()
# Ensure that have series
if not len(Plot.series()):
form.label.setEnabled(False)
form.isLabel.setEnabled(False)
form.style.setEnabled(False)
form.marker.setEnabled(False)
form.width.setEnabled(False)
form.size.setEnabled(False)
form.color.setEnabled(False)
form.remove.setEnabled(False)
return
# Set label
serie = Plot.series()[self.item]
if serie.name is None:
form.isLabel.setChecked(True)
form.label.setEnabled(False)
form.label.setText("")
else:
form.isLabel.setChecked(False)
form.label.setText(serie.name)
# Set line style and marker
form.style.setCurrentIndex(0)
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
if style == serie.line.get_linestyle():
form.style.setCurrentIndex(i)
form.marker.setCurrentIndex(0)
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
if marker == serie.line.get_marker():
form.marker.setCurrentIndex(i)
# Set line width and marker size
form.width.setValue(serie.line.get_linewidth())
form.size.setValue(serie.line.get_markersize())
# Set color
color = Colors.colorConverter.to_rgb(serie.line.get_color())
form.color.setStyleSheet("background-color: rgb({}, {}, {});".format(
int(color[0] * 255),
int(color[1] * 255),
int(color[2] * 255)))
self.skip = False
def setList(self):
"""Setup the UI control values if it is possible."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.items.clear()
series = Plot.series()
for i in range(0, len(series)):
serie = series[i]
string = 'serie ' + str(i) + ': '
if serie.name is None:
string = string + '\"No label\"'
else:
string = string + serie.name
form.items.addItem(string)
# Ensure that selected item is correct
if len(series) and self.item >= len(series):
self.item = len(series) - 1
form.items.setCurrentIndex(self.item)
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
| lgpl-2.1 |
grlee77/scipy | scipy/signal/ltisys.py | 12 | 128865 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant:
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
r"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`:
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([3., 4.]),
array([1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
r"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with
a sampling time of 0.1 seconds:
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([3., 4.]),
array([1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super().__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}`
with sampling time 0.5s:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.1 seconds:
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.5 seconds:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super().__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
# Override NumPy binary operations and ufuncs
__array_priority__ = 100.0
__array_ufunc__ = None
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
def _check_binop_other(self, other):
return isinstance(other, (StateSpace, np.ndarray, float, complex,
np.number, int))
def __mul__(self, other):
"""
Post-multiply another system or a scalar
Handles multiplication of systems in the sense of a frequency domain
multiplication. That means, given two systems E1(s) and E2(s), their
multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s)
is equivalent to first applying E2(s), and then E1(s).
Notes
-----
For SISO systems the order of system application does not matter.
However, for MIMO systems, where the two systems are matrices, the
order above ensures standard Matrix multiplication rules apply.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
return NotImplemented
if self.dt != other.dt:
raise TypeError('Cannot multiply systems with different `dt`.')
n1 = self.A.shape[0]
n2 = other.A.shape[0]
# Interconnection of systems
# x1' = A1 x1 + B1 u1
# y1 = C1 x1 + D1 u1
# x2' = A2 x2 + B2 y1
# y2 = C2 x2 + D2 y1
#
# Plugging in with u1 = y2 yields
# [x1'] [A1 B1*C2 ] [x1] [B1*D2]
# [x2'] = [0 A2 ] [x2] + [B2 ] u2
# [x1]
# y2 = [C1 D1*C2] [x2] + D1*D2 u2
a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))),
np.hstack((zeros((n2, n1)), other.A))))
b = np.vstack((np.dot(self.B, other.D), other.B))
c = np.hstack((self.C, np.dot(self.D, other.C)))
d = np.dot(self.D, other.D)
else:
# Assume that other is a scalar / matrix
# For post multiplication the input gets scaled
a = self.A
b = np.dot(self.B, other)
c = self.C
d = np.dot(self.D, other)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __rmul__(self, other):
"""Pre-multiply a scalar or matrix (but not StateSpace)"""
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
# For pre-multiplication only the output gets scaled
a = self.A
b = self.B
c = np.dot(other, self.C)
d = np.dot(other, self.D)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __neg__(self):
"""Negate the system (equivalent to pre-multiplying by -1)."""
return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict)
def __add__(self, other):
"""
Adds two systems in the sense of frequency domain addition.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
raise TypeError('Cannot add {} and {}'.format(type(self),
type(other)))
if self.dt != other.dt:
raise TypeError('Cannot add systems with different `dt`.')
# Interconnection of systems
# x1' = A1 x1 + B1 u
# y1 = C1 x1 + D1 u
# x2' = A2 x2 + B2 u
# y2 = C2 x2 + D2 u
# y = y1 + y2
#
# Plugging in yields
# [x1'] [A1 0 ] [x1] [B1]
# [x2'] = [0 A2] [x2] + [B2] u
# [x1]
# y = [C1 C2] [x2] + [D1 + D2] u
a = linalg.block_diag(self.A, other.A)
b = np.vstack((self.B, other.B))
c = np.hstack((self.C, other.C))
d = self.D + other.D
else:
other = np.atleast_2d(other)
if self.D.shape == other.shape:
# A scalar/matrix is really just a static system (A=0, B=0, C=0)
a = self.A
b = self.B
c = self.C
d = self.D + other
else:
raise ValueError("Cannot add systems with incompatible "
"dimensions ({} and {})"
.format(self.D.shape, other.shape))
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __sub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(-other)
def __radd__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(other)
def __rsub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return (-self).__add__(other)
def __truediv__(self, other):
"""
Divide by a scalar
"""
# Division by non-StateSpace scalars
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
if isinstance(other, np.ndarray) and other.ndim > 0:
# It's ambiguous what this means, so disallow it
raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays")
return self.__mul__(1/other)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
See Also
--------
lsim
Examples
--------
We'll use `lsim2` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim2
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim2`.
>>> tout, yout, xout = lsim2((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0, 1], [0, 0]])
>>> B = np.array([[0], [1]])
>>> C = np.array([[1, 0]])
>>> D = 0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim2(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float64):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
We'll use `lsim` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim`.
>>> tout, yout, xout = lsim((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0.0, 1.0], [0.0, 0.0]])
>>> B = np.array([[0.0], [1.0]])
>>> C = np.array([[1.0, 0.0]])
>>> D = 0.0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = np.empty((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in range(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, scipy.integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step2(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
r"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`:
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see https://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
https://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
https://hdl.handle.net/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
https://hdl.handle.net/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback", IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and beginning of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError as e:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles") from e
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : tuple of ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dimpulse(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : tuple of ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dstep(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
r"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function
:math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05
seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with
a sampling time of 0.05 seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| bsd-3-clause |
jwbuurlage/Zee | script/plot.py | 1 | 2256 | #!/usr/bin/python3
# plot.py reads descriptive.mtx, .plt, ... files and plots these` using matplotlib
#
# FIXME: REQUIRES USETEX, PNGDVI, etc.
# TODO: zplot support
import argparse
import os
import yaml
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.ticker as ticker
import numpy as np
from sparseplotter import *
#from matplotlib2tikz import save as tikz_save
from math import log, ceil
parser = argparse.ArgumentParser(description=
"This script reads one or multiple .mtx file(s), and outputs"
" a spy plot to screen or as a .pdf file")
parser.add_argument('--save', action='store_true',
help="save to file, dont show on screen")
parser.add_argument('--showfile', action='store_true',
help="save to file, show file on screen")
parser.add_argument('--filetype', type=str, default='pdf',
help="filetype used for saving images")
parser.add_argument('--directory', type=str, default='',
help="the directory in which the matrices are stored. Including"
" the trailing /")
parser.add_argument('--figsize', type=int, default=3,
help="size in inches of the figure")
parser.add_argument('--viewer', type=str, default='xdg-open',
help="the viewer for the image")
parser.add_argument('files', type=str, nargs='+',
help="The file(s) to use as input")
args = parser.parse_args()
###################################################
# PLOT YAML
#def plot(plot_file):
# f = open(plot_file, 'r')
#
# # FIXME: check if zee plot file or error
# contents = f.read()
# contents = contents.replace("\\", "\\\\")
#
# plot_data = yaml.load(contents)
#
# attributes = ["title", "xlabel", "ylabel", "yscale"]
# for attr in attributes:
# if attr in plot_data:
# getattr(plt, attr)(plot_data[attr])
#
# for line in plot_data["lines"]:
# line_data = plot_data["lines"][line]["data"]
# plt.plot(line_data)
#
# finalize_plt(plot_file)
for f in args.files:
f = args.directory + f
extension = f.split('.')[-1]
if extension == "mtx":
spy(f)
elif extension == "yaml":
plot(f)
else:
print("can not plot file with extension " + extension)
| gpl-3.0 |
rubikloud/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
maxvonhippel/q2-diversity | q2_diversity/tests/test_core_metrics.py | 1 | 1666 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import io
import unittest
import biom
import skbio
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from q2_diversity import core_metrics
class CoreMetricsTests(unittest.TestCase):
def test_core_metrics(self):
table = biom.Table(np.array([[0, 11, 11], [13, 11, 11]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
results = core_metrics(table, tree, 13)
self.assertEqual(len(results), 12)
expected = pd.Series({'S1': 1, 'S2': 2, 'S3': 2},
name='observed_otus')
pdt.assert_series_equal(results[1], expected)
def test_core_metrics_rarefy_drops_sample(self):
table = biom.Table(np.array([[0, 11, 11], [12, 11, 11]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
results = core_metrics(table, tree, 13)
self.assertEqual(len(results), 12)
expected = pd.Series({'S2': 2, 'S3': 2},
name='observed_otus')
pdt.assert_series_equal(results[1], expected)
| bsd-3-clause |
Chilipp/nc2map | _maps_old.py | 1 | 61976 | # -*- coding: utf-8 -*-
import glob
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from itertools import izip, chain, permutations, product
from collections import OrderedDict
from mapos import mapBase, fieldplot, windplot, returnbounds, round_to_05
from _ncos import ncos
from _mapsproperties import mapsproperties
import formatoptions
from formatoptions import fmtBase
from _cmap_ops import get_cmap, CbarManager
from evaluators import ViolinEval
_props = mapsproperties()
evaluatorsdict = { # dictionary with evaluator classes
'violin': ViolinEval
}
currentmap = None
openmaps = []
def gcm():
"""Return the current maps instance"""
return currentmap
def scm(mymaps):
"""Sets the current maps instance"""
global currentmap
currentmap = mymaps
def close():
"""close all open maps instances"""
global openmaps
for mymaps in openmaps: mymaps.close()
openmaps = []
def update(fmt={}, add = True, delete = True, todefault = False, **kwargs):
"""Shortcut to the current maps instance update function"""
currentmap.update(fmt,add,delete,todefault, **kwargs)
class maps(object):
"""
Creates an object containing mapBase instances for given variables, times and
levels. For initialization keywords see initialization function __init__ below.
To change attributes, please use the update function.
Methods are:
- get_maps: returns a list of all mapBase instances contained in the maps instance
- get_figs: returns a dictionary with figs as keys and the corresponding mapBase
instance
- get_winds: if not windonly: Returns the windinstances of the fieldplot instances
- get_cbars: returns a list of cbars of the specified dimensions
- get_labels: returns names, times, etc. of the given instances
- output: Saves the specified figures to a file
- update: updates the maps from a dictionary or given keywords
- update_cbar: updates the cbars if any cbars are drawn to handle multiple mapBase
instances
- nextt: updates all maps to their next timestep
- previoust: updates all maps to the previous timestep
- reset: reinitializes the maps instance and makes a new plot (probably your best
friend especially after playing around with colorbars)
- show: shows all figures
- redo: redo the changes made
- undo: undo the changes made
- make_movie: makes a movie of the specified figures and saves it
- asdict: returns the current formatoptions of the maps instance as dictionary
- script: creates a file for initialization of the maps instance
- removecbars: removes the specified colorbars if any cbars are drawn to handle multiple
mapBase instances and allows the manipulation of the bounds of the
separate mapBase instances
- close: closes the maps instance and all corresponding mapBase instances and
figures
See below for a more detailed description of each method.
"""
# ------------------ define properties here -----------------------
# mapdata dictionary property
maps = _props.default('maps', """list containing mapBase instances from the initialization""")
evaluators = _props.default('evaluators', """List containing the evaluator instances of the maps instance""")
fname = _props.default('fname', """Name of the nc-file""")
names = _props.names('names', """List of tuples (name, var, time, level) for each mapBase instance""")
vlst = _props.vlst('vlst', """List of variables""")
times = _props.times('times', """List of time steps""")
levels = _props.levels('levels', """List of levels""")
subplots = _props.subplots('subplots', """List of subplots""")
nco = _props.nco('nco', """netCDF4.MFDataset instance of ncfile""")
def __init__(self, ncfile, names=None, vlst = None, times = 0, levels = 0, ax = (1,1), sort = 'vtl', fmt = None, timenames = ['time'], levelnames = ['level', 'lvl', 'lev'], lon=['lon', 'longitude', 'x'], lat=['lat', 'latitude', 'y'], windonly=False, onecbar = False, u=None, v=None, figsize = None, plot=True):
"""
Input:
- ncfile: string or 1D-array of strings. Path to the netCDF-file containing the
data for all variables. Filenames may contain wildcards (*, ?, etc.) as suitable
with the Python glob module (the netCDF4.MFDataset is used to open the nc-file).
You can even give the same netCDF file multiple times, e.g. for making one figure
with plots for one variable, time and level but different regions.
- names: string, tuple or list of those. This sets up the unique identifier for each
mapBase instance. If None, they will be called like mapo0, mapo1, ... and variables
will be chosen as defined by vlst, times and levels (in other words
N=len(vlst)*len(times)*len(levels) mapBases will be created with the names
mapo0, ..., mapoN. If names is not None, it can be either a list of tuples or strings:
-- string: Names of the mapBase instances (or only one string if only one mapBase
instance)
-- tuple: (<name>, <var>, <time>, <level>), where <name> is the name of the mapBase
instance (as string), <var> the variable name (as string), <time> the timestep
(as integer) and <level> the level (as integer (will not be used if no level
dimension found in netCDF file)). This tuple defines directly which variables at
which timestep and level to plot and how to name the mapBase instance
- vlst: string or 1D-array of strings. List containing all variables which
shall be plotted or only one variable. The given strings names must correspond to
the names in <ncfile>. If None, all variables which are not declared as dimensions
are used. If windonly is True, this name will be used for the wind variable
- u: string (Default: None). Name of the zonal wind variable if a windplot
shall be visualized
- v: string (Default: None). Name of the meridional wind variable if a windplot shall
be visualized
- times: integer or list of integers. Timesteps which shall be plotted
- levels: integer or list of integers. Levels which shall be plotted
- ax: matplotlib.axes.AxesSubplot instance or list of matplotlib.axes.AxesSubplot
instances or tuple (x,y[,z]) (Default: (1,1)). If ax is an axes instance (e.g.
plt.subplot()) or a list of axes instances, the data will be plotted into these axes.
If ax is a tuple (x,y), figures will be created with x rows and y columns of subplots.
If ax is (x,y,z), only the first z subplots of each figure will be used.
- figsize: Tuple (x,y), (Default: None, i.e. (8,6)). Size of the figure (does not have
an effect if ax is a subplot or a list of subplots).
- windonly: Do not print an underlying field but only u and v on the maps.
- sort: string, combination of 't', 'v' and 'l' (Default: 'vtl'). Gives the order of
sorting the maps on the figures (i.e. sort = 'vtl' will first sort for variables, then
time then level).
- timenames: 1D-array of strings: Gives the name of the time-dimension for which will be
searched in the netCDF file
- levelnames: 1D-array of strings: Gives the name of the fourth dimension (e.g vertical levels)
for which will be searched in the netCDF file
- lon: 1D-array of strings: Gives the name of the longitude-dimension for which will be
searched in the netCDF file
- lat: 1D-array of strings: Gives the name of the latitude-dimension for which will be
searched in the netCDF file
- onecbar: boolean, dictionary or tuple of dictionaries (Default: False). If True, one
colorbar will be drawn for each figure. If dictionary: the syntax is as follows:
onecbar = {['vlst':...][, 'times':...][, 'levels':...][, 'formatoption keyword':...]}
where [] indicate optional arguments. 'vlst', 'times' and 'levels' may be a list of the
variables, times or levels respectively (If not set: Use all variables, times, etc. in the
maps object) and 'formatoption keyword' may be any regular key word of the formatoptions
controlling the colorbar ('cmap', 'bounds', 'clabel', 'plotcbar', etc.). To update those
colorbars. Use the update_cbar function.
- fmt: dictionary (Default: None). Dictionary controlling the format of the plots.
Syntax is as follows:
fmt = {['<<<var>>>':{
['t<<<time>>>': {
['l<<<level>>>': {'keyword': ..., ...}]
[, 'keyword': ...,...]
}]
[, 'l<<<level>>>: {'keyword': ..., ...}]
[, 'keyword': ..., ...]
}]
[, 't<<<time>>>': {
['l<<<level>>>': {'keyword': ..., ...}]
[, 'keyword': ...,...]
}]
[, 'l<<<level>>>: {'keyword':..., ...}]
[, <<<name>>>: {'keyword':..., ...}]
[, 'keyword':..., ...]
}.
Seems complicated, but in fact rather simple considering the following rules:
-- Formatoptions are set via 'keyword':value (for possible keywords, see below).
-- Time and level specific keywords are put into a dictionary indicated by the key
't<<<time>>>' or 'l<<<level>>>' respectively (where <<<time>>> and <<<level>>>
is the number of the time, and or level).
-- To set default formatoptions for each map: set the keyword in the upper most hierarchical
level of formatoptions (e.g. fmt = {'plotcbar':'r'}).
-- To set default formatoptions for each variable, times or level separately set the keyword
in the second hierarchical level of formatoptions (e.g. fmt = {'t4':{'plotcbar:'r'}}
will only change the formatoptions of maps with time equal to 4,
fmt = {'l4':{'plotcbar:'r'}} will only change formatoptions of maps with level
equal to 4).
-- To set default options for a specific variable and time, but all levels: put them in the 3rd
hierarchical level of formatoptions (e.g. fmt = {<<<var>>>:{'t4':{'plotcbar':'r'}}}
will only change the formatoptions of each level corresponding to variable <<<var>>> and
time 4). Works the same for setting default options for specific variable and level, but all
times.
-- To set a specific key for one map, just set
fmt = {<<<var>>>: {'t<<<time>>>': {'l<<<level>>>': {'plotcbar: 'r', ...}}}}
or directly with the name of the mapBase instance (see names keyword)
fmt = {<<<name>>>: {'plotcbar': 'r', ...}}
.
The formatoption keywords are:
"""
# docstring is extended below
global currentmap
global openmaps
currentmap = self
openmaps = openmaps + [self]
self.maps = []
self.evaluators = []
self._cbars = []
self._ncos = []
try:
self.fname = glob.glob(ncfile)
except TypeError:
self.fname = ncfile
self.nco = self.fname
self.lonnames = lon
self.latnames = lat
self.plot = plot
self.timenames = timenames
self.levelnames= levelnames
self._dims = {'lon':lon, 'lat':lat, 'time':timenames, 'level':levelnames}
if vlst is None and not windonly:
self.vlst = [str(key) for key in self.nco.variables.keys() if key not in lon+lat+timenames+levelnames]
else:
if isinstance(vlst, str): vlst = [vlst]
self.vlst = vlst
if isinstance(times, int): times = [times]
if isinstance(levels, int): levels = [levels]
self.levels = levels
self.times = times
self.figsize = figsize
self.sort = sort
self.u = u
self.v = v
self.windonly = windonly
self.names = self._setupnames(names, self.vlst, self.times, self.levels, self.sort)
self.subplots = (ax, len(self.names))
self._setupfigs(self.names, self._setupfmt(fmt), self.subplots, self.nco)
if plot:
print("Setting up projections...")
for mapo in self.get_maps(): mapo._setupproj()
print("Making plots...")
self.make_plot()
for fig in self.get_figs():
names, vlst, times, levels, long_names, units = self.get_labels(fig)
fig.canvas.set_window_title('Figure ' + str(fig.number) + ': Variable ' + ','.join(var for var in vlst) + ', time ' + ', '.join(str(time) for time in times) + ', level ' + ', '.join(str(level) for level in levels))
if onecbar is not False:
if onecbar is True: self.update_cbar(*(dict(zip(['names', 'vlst','times','levels'], self.get_labels(fig)[:4])) for fig in self.get_figs()), add = False, delete = False)
elif isinstance(onecbar, dict): self.update_cbar(onecbar, add=False, delete = False)
else: self.update_cbar(*onecbar, add = False, delete = False)
self._namesfrominit = [nametuple[0] for nametuple in self.names]
# old fmts (for function undo)
self._fmt = [self.asdict('maps','cbars')]
# future fmts (for function redo)
self._newfmt = []
def evaluate(self, evalname, *args, **kwargs):
"""Perform and evaluation on mapBase instances. kwargs depend on the chosen
evaluator. See method eval_doc for documentation of each evaluator.
Possible evaluators are
- """
# docstring is extended below
self.evaluators.append(evaluatorsdict[evalname](*args, mapsin=self, **kwargs))
return self.evaluators[-1]
def eval_doc(self, evalname):
"""Shows the documentation of the evaluator. Possible evaluator names are
- """
# docstring is extended below
help(evaluatorsdict[evalname])
def make_plot(self, *args, **kwargs):
"""makes the plot of mapBase instances. Don't use this function but rather
the update function to make plots"""
for mapo in self.get_maps(*args, **kwargs): mapo.make_plot()
def get_maps(self, names=None, vlst=None, times=None, levels=None):
"""Returns 1D-numpy array containing the mapBase instances stored in maps.
Input:
- names: string of 1D array of strings (Default: None). If not None,
but string or list of strings. Only the specified mapBase
instances are returned.
- vlst: string or 1D array of strings (Default: None). If not None,
the strings need to be the name of a variable contained in
the maps instance
- times: same as vlst but for times (as integers!)
- levels: same as vlst but for levels (as integers!)
Output:
- list of mapBase instances
"""
if names is None: names = np.unique([mapo.name for mapo in self.maps]).tolist()
elif isinstance(names, str): names = [names]
if vlst is None: vlst = np.unique([mapo.var for mapo in self.maps]).tolist()
elif isinstance(vlst, str): vlst = [vlst]
if times is None: times = np.unique([mapo.time for mapo in self.maps]).tolist()
elif isinstance(times, int): times = [times]
if levels is None: levels= np.unique([mapo.level for mapo in self.maps]).tolist()
elif isinstance(levels, int): levels = [levels]
return [mapo for mapo in self.maps if mapo.name in names and mapo.var in vlst and mapo.time in times and mapo.level in levels]
def get_winds(self, *args, **kwargs):
"""Returns 1D-numpy array containing the windplot instances stored in maps
if windonly is not True (in this case: use get_maps()).
Keyword arguments are determined by function get_maps (i.e. names, vlst, times and levels)."""
if not self.windonly: return [mapo.wind for mapo in self.get_maps(*args, **kwargs) if mapo.wind is not None]
else: return []
def get_figs(self, *args, **kwargs):
"""Returns dictionary containing the figures used in the maps instance
as keys and a list with the included mapBase instances as value.
Without any kwargs and args, return all figures from the maps instance.
Otherwise you can either give mapBase objects or figures as arguments
or specify one or each of the following key words to return all figures
related to the specified variable, time or level
- names: string or list of strings. Specify names of mapBase instances
to return
- vlst: string or list of strings. Specify variables to return
- times: integer or list of integers. Specify times to return
- levels: integer or list of integers. Specify levels to return
Example: self.get_figs(vlst='temperature') will return a dictionary with
all figures that have mapBase instances with the variable 'temperature' as
subplot as keys and a list with the corresponding mapBase instances as values.
If 'wind' in args: the mapBase instances will be the corresponding wind-
plot instances to the figure.
"""
if 'wind' in args:
get_func = self.get_winds
args = tuple(arg for arg in args if arg != 'wind')
else:
get_func = self.get_maps
if args == ():
maps = get_func(**kwargs)
figs = OrderedDict()
append = True
elif all(isinstance(arg, mapBase) for arg in args):
maps = args
figs = OrderedDict()
append = True
elif all(isinstance(arg, mpl.figure.Figure) for arg in args):
figs = OrderedDict([(arg, []) for arg in args])
maps = get_func()
append = False
else: raise TypeError("Wrong type of obj! Object must either be 'maps' or 'winds'!")
for mapo in maps:
if mapo.ax.get_figure() not in figs and append: figs[mapo.ax.get_figure()] = []
if mapo.ax.get_figure() in figs: figs[mapo.ax.get_figure()].append(mapo)
return figs
def _replace(self, txt, fig, delimiter='-'):
"""Function to replace strings by objects from fig
Input:
- txt: string where <<<var>>>, <<<time>>>, <<<name>>>, <<<level>>>,
<<<longname>>>, <<<units>>> shall be replaced by the corresponding
attributes of the mapBase object plotted in fig.
- fig: figure or list of figures
- delimiter: string which shall be used for separating values, if more
than one mapBase instance is inside the figure
Returns:
- string without <<<var>>>, <<<time>>> and so on
"""
if isinstance(fig, mpl.figure.Figure):
fig = [fig]
values = self.get_labels(*fig)
values = [map(str, value) for value in values]
wildcards = ['<<<name>>>', '<<<var>>>', '<<<time>>>', '<<<level>>>', '<<<longname>>>', '<<<unit>>>']
for wildcard, value in izip(wildcards, values):
txt = txt.replace(wildcard, delimiter.join(value))
return txt
def output(self, output, *args, **kwargs):
"""Saves the figures.
Just setting output = 'filename.pdf' will save all figures of the maps object to filename.pdf
Further input options:
- output: string or 1D-array of strings. If string: <<<var>>>,
<<<time>>>, <<<level>>>, <<<long_name>>>, <<<unit>>> will be
replaced by the attributes contained in the figures.
Arguments:
- Either figures or mapBase instances which shall be saved (in
case of mapBase, the corresponding figure will be saved)
- 'tight' making the bbox_inches of the plot tight, i.e. reduce
the output to the plot margins
Keyword arguments:
- names: To save only the figures with the mapBase instances
specified in names
- vlst: To save only the figures with variables specified in vlst
- times: To save only the figures with times specified in times
- levels: To save only the figures with levels specified in levels
- any other keyword as specified in the pyplot.savefig function.
These are:
"""
# the docstring is extended by the plt.savefig docstring below
from matplotlib.backends.backend_pdf import PdfPages
saveops = {key:value for key, value in kwargs.items() if key not in ['names', 'vlst','times','level']}
if 'tight' in args: saveops['bbox_inches'] = 'tight'; args = tuple([arg for arg in args if arg != 'tight'])
kwargs = {key:value for key, value in kwargs.items() if key in ['names', 'vlst','times','level']}
if args == ():
figs = self.get_figs(**kwargs).keys()
elif isinstance(args[0], mapBase):
names, vlst, times, levels, long_names, units = self.get_labels(*args)
figs = self.get_figs(vlst=names, times=times, levels=levels)
else:
figs = args
if isinstance(output, str):
if output[-4:] in ['.pdf', '.PDF']:
output = self._replace(output, figs)
with PdfPages(output) as pdf:
for fig in figs: pdf.savefig(fig, **saveops)
print('Saving plot to ' + output)
return
else:
strout = output
output = []
for fig in figs:
names, vlst, times, levels, long_names, units = self.get_labels(fig)
output = self._replace(output, fig)
else: pass
# test output
try:
if len(np.shape(output)) > 1: raise ValueError('Output array must be a 1D-array!')
if len(figs) != len(output): raise ValueError('Length of output names (' + str(len(output)) + ') does not fit to the number of figures (' + str(len(figs)) + ').')
except TypeError:
raise TypeError('Output names must be either a string or an 1D-array of strings!')
for fig in figs:
fig.savefig(output[figs.index(fig)], **saveops)
print('Plot saved to ' + output[figs.index(fig)])
return
def update(self, fmt={}, add = True, delete = True, todefault = False, **kwargs):
"""Function to update the mapBase objects.
Input:
- fmt: dictionary (the same shape and options linke in the initialization function
__init__).
- add: Boolean (Default: True). Adds the new formatoptions to old formatoptions
allowing a undoing via the undo function
- delete: Boolean (Default: True). Deletes the newer formatoptions if created by
function undo for the redo function.
- todefault: Boolean (Default: False). Sets all formatoptions which are not speci-
fied by fmt or kwargs to default.
Additional keyword arguments may be any valid formatoption keyword.
"""
from copy import deepcopy
fmt = deepcopy(fmt) # if not deepcopied, the update in the next line will use previous fmts given to the update function
fmt.update({key:value for key, value in kwargs.items() if key not in ['names', 'vlst', 'times','levels']})
fmt = self._setupfmt(fmt)
maps = self.get_maps(**{key: value for key, value in kwargs.items() if key in ['names', 'vlst', 'times', 'levels']})
# update maps
for mapo in maps: mapo.update(todefault=todefault, **fmt[mapo.name])
# update figure window title and draw
for cbar in self.get_cbars(*maps): cbar._draw_colorbar()
for fig in self.get_figs(*maps):
plt.figure(fig.number)
names, vlst, times, levels, long_names, units = self.get_labels(fig)
fig.canvas.set_window_title('Figure ' + str(fig.number) + ': Variable ' + ','.join(var for var in vlst) + ', time ' + ', '.join(str(time) for time in times) + ', level ' + ', '.join(str(level) for level in levels))
plt.draw() # if it is part of a cbar, it has already been drawn above
# add to old fmts
if add: self._fmt.append(self.asdict('maps','cbars'))
# delete new fmts
if delete: self._newfmt = []
del fmt
def get_names(self, *args, **kwargs):
"""return a list of tuples (name, var, time, level) for each map object
*args and **kwargs are determined by get_maps method
"""
names = []
for mapo in self.get_maps(*args, **kwargs):
names.append((mapo.name, mapo.var, mapo.time, mapo.level))
return names
def nextt(self,*args,**kwargs):
"""takes the next time step for maps specified by args and kwargs
(same syntax as get_maps. Use 'wind' as an argument if only winds
shall be updated."""
if not self.plot: plot=False
else: plot=True
if 'wind' in args: maps = self.get_winds(*(arg for arg in args if arg != 'wind'), **kwargs)
else: maps = self.get_maps(*args, **kwargs)
for mapo in maps: mapo.update(time = mapo.time + 1, plot=plot)
if self.plot:
for fig in self.get_figs(*maps): plt.figure(fig.number); plt.draw()
def prevt(self,*args,**kwargs):
"""takes the previous time step for maps specified by args and kwargs
(same syntax as get_maps. Use 'wind' as an argument if only winds shall
be updated"""
if not self.plot: plot=False
else: plot=True
if 'wind' in args: maps = self.get_winds(*(arg for arg in args if arg != 'wind'), **kwargs)
else: maps = self.get_maps(*args, **kwargs)
for mapo in maps: mapo.update(time = mapo.time - 1, plot=plot)
if self.plot:
for fig in self.get_figs(*maps): plt.figure(fig.number); plt.draw()
def reset(self, num=0, fromscratch = False, ax=None, sort=None, figsize=None):
"""Reinitializes the maps object with the specified settings.
Works even if undo function fails.
Input:
- num: Number of formatoptions (like undo function). 0 is cur-
rent, -1 the one before (often the last one working), etc.
- fromscratch: Boolean. If False, only figures will be closed
and recreated (if ax is not None) or the axes will be reset
if ax is None. If True the whole maps instance will be closed
and reopend by loading the data from the nc-file (use this
option if you accidently closed the maps instance or for example
if you accidently set the wrong variables, times or levels and
undo function failed.)
- ax, sort, figsize: Like in initialization function __init__:
Specify the subplot, sort and figsize setting (if None: the
current settings will be used and if ax is None, no new figures
will be created).
"""
if self._fmt == []: raise ValueError('Impossible option')
if num > 0 and num >= len(self._fmt)-1: raise ValueError('Too high number! Maximal number is ' + str(len(self._fmt)-1))
elif num < 0 and num < -len(self._fmt): raise ValueError('Too small number! Minimal number is ' + str(-len(self._fmt)+1))
if figsize is not None: self.figsize = figsize
# try to save ncos
nametuples = self.names
enhancednametuples = [list(nametuple) for nametuple in self.names]
for nametuple in enhancednametuples:
nametuple.append(self.get_maps(*nametuple)[0].nco)
# reset cbars
self.removecbars()
self._cbars = []
# close the maps instance
if fromscratch:
if ax is None: ax = self._subplot_shape
if ax is None: ax = (1,1)
try:
self.close(vlst=self.vlst)
except AttributeError: print("Could not close the figures but anyway will draw new figures")
except KeyError: print("Could not close the figures but anyway will draw new figures")
if not hasattr(self, 'nco'): self.nco = None
elif ax is not None:
try:
self.close('figure')
except AttributeError: print("Could not close the figures but anyway will draw new figures")
except KeyError: print("Could not close the figures but anyway will draw new figures")
# set new subplots
if ax is not None:
del self.subplots
self.subplots = (ax, len(enhancednametuples))
else:
for ax in self.subplots: ax.clear
# set new figures
# change names sorting
if sort is not None:
self.sort = sort
nametuples = self._setupnames(nametuples, self.vlst, self.times, self.levels, self.sort)
# set up figures
for nametuple in enhancednametuples:
subplot = self.subplots[nametuples.index(tuple(nametuple[:-1]))]
self._setupfigs([tuple(nametuple[:-1])], fmt=self._fmt[num-1][0], subplots=[subplot], nco=nametuple[-1], fromscratch=fromscratch)
if self.plot:
print("Setting up projections...")
for mapo in self.get_maps(): mapo._setupproj()
print("Making plots...")
self.make_plot()
for fig in self.get_figs():
plt.figure(fig.number)
names, vlst, times, levels, long_names, units = self.get_labels(fig)
fig.canvas.set_window_title('Figure ' + str(fig.number) + ': Variable ' + ','.join(var for var in vlst) + ', time ' + ', '.join(str(time) for time in times) + ', level ' + ', '.join(str(level) for level in levels))
plt.draw()
if self._fmt[num-1][1] != []: self.update_cbar(*self._fmt[num-1][1], add = False, delete = False)
# shift to new fmt
if num != 0:
self._newfmt = self._fmt[num:] + self._newfmt
if num < 0: self._fmt.__delslice__(len(self._fmt)+num, len(self._fmt))
else: self._fmt.__delslice__(num,len(self._fmt))
def undo(self, num=-1):
"""Undo the changes made. num gives the number of changes to go back."""
if self._fmt == [] or len(self._fmt) == 1: raise ValueError('Impossible option')
if num > 0 and num >= len(self._fmt)-1: raise ValueError('Too high number! Maximal number is ' + str(len(self._fmt)-1))
elif num < 0 and num < -len(self._fmt): raise ValueError('Too small number! Minimal number is ' + str(-len(self._fmt)+1))
if self._fmt[num-1][1] == []: self.removecbars()
self.update(self._fmt[num-1][0], add=False, delete=False, todefault = True)
if self._fmt[num-1][1] != []: self.update_cbar(*self._fmt[num-1][1], add=False, delete=False, todefault = True)
# shift to new fmt
self._newfmt = self._fmt[num:] + self._newfmt
if num <= 0: self._fmt.__delslice__(len(self._fmt)+num, len(self._fmt))
else: self._fmt.__delslice__(num,len(self._fmt))
def redo(self, num=1):
"""Redo the changes made. num gives the number of changes to use."""
if self._newfmt == []: raise ValueError('Impossible option')
if num > 0 and num > len(self._newfmt): raise ValueError('Too high number! Maximal number is ' + str(len(self._newfmt)))
elif num < 0 and num < -len(self._newfmt): raise ValueError('Too small number! Minimal number is ' + str(-len(self._newfmt)-1))
if self._newfmt[num-1][1] == []: self.removecbars()
self.update(self._newfmt[num-1][0], add=False, delete=False, todefault = True)
if self._newfmt[num-1][1] != []: self.update_cbar(*self._newfmt[num-1][1], add=False, delete=False, todefault = True)
# shift to old fmt
self._fmt = self._fmt + self._newfmt[:num]
if num > 0: self._newfmt.__delslice__(0,num)
else: self._newfmt.__delslice__(0,len(self._newfmt)+num)
def show(self):
"""shows all open figures (without blocking)"""
plt.show(block=False)
def close(self,*args,**kwargs):
"""Without any args and kwargs, close all open figure from the maps object,
delete all mapBase objects and close the netCDF4.MFDataset.
Otherwise you can give the following arguments:
- 'data': delete all mapBase instances and (without any additional keywords)
close the netCDF4.MFDataset
- 'figure': Close the figures specified by kwargs
You can further specify, which mapBase instances to close. Possible keywords are
- vlst: string or list of strings. Specify variables to close
- times: integer or list of integers. Specify times to close
- levels: integer or list of integers. Specify levels to close
"""
if any(arg not in ['data','figure'] for arg in args):
raise KeyError('Unknown argument ' + ', '.join(arg for arg in args if arg not in ['data','figure']) + ". Possibilities are 'data' and 'figure'.")
if self.maps == []: return
if 'data' in args or args is ():
for mapo in self.get_maps(**kwargs):
mapo.close('data')
if 'figure' in args or args is ():
for mapo in self.get_maps(**kwargs):
mapo._removecbar(['sh','sv'])
if isinstance(mapo, fieldplot) and mapo.wind is not None: mapo.wind._removecbar(['sh','sv'])
for fig in self.get_figs(**kwargs).keys(): plt.close(fig)
for cbar in self._cbars:
for cbarpos in cbar.cbar:
for fig in cbar.cbar[cbarpos]:
plt.close(fig)
if args == ():
for mapo in self.get_maps(**kwargs):
self.maps.remove(mapo)
if kwargs == {} and ('data' in args or args == ()): del self.nco
def update_cbar(self,*args, **kwargs):
"""Update or create a cbar.
Arguments are dictionaries
onecbar = {['vlst':...][, 'times':...][, 'levels':...][, 'formatoption keyword':...]}
where [] indicate optional arguments. 'vlst', 'times' and 'levels' may be a list of the
variables, times or levels respectively (If not set: Use all variables, times, etc. in the
maps object) and 'formatoption keyword' may be any regular key word of the formatoptions
controlling the colorbar ('cmap', 'bounds', 'clabel', 'plotcbar', etc.).
Keyword arguments (kwargs) may also be formatoption keywords or out of vlst, times and levels.
They will then be treated like a single dictionary (this is just to avoid nasty typing of :,
{}, etc.). Further keyword arguments may be
- add: Boolean (Default: True). Adds the new formatoptions to old formatoptions
allowing a undoing via the undo function
- delete: Boolean (Default: True). Deletes the newer formatoptions if created by
function undo for the redo function.
- todefault: Boolean (Default: False). Sets all formatoptions which are not speci-
fied by fmt or kwargs to default.
If no colorbar with any of the specified dimensions is found, a new colorbar manager object is
created. For security reasons: There is no possibility to add new dimensions or variables to an
existing colorbar. To do so, remove the colorbars with the removecbar function and make a new one with this
function.
"""
add = kwargs.get('add', True)
delete = kwargs.get('delete', True)
todefault = kwargs.get('todefault', False)
kwargs = {key:value for key,value in kwargs.items() if key not in ['add','delete','todefault']}
if kwargs != {}: newops = list(args) + [kwargs]
else: newops = list(args)
# first set colorbars
for cbarops in newops:
if 'windplot' in cbarops: args = tuple(['wind']); cbarops.update(cbarops.pop('windplot')); wind=True; get_func = self.get_winds
else: args = (); wind = False; get_func=self.get_maps
dims = {key:cbarops.get(key, None) for key in ['names', 'vlst','levels','times']}
# if no colorbars are set up to now and no specific var, time and level options are set, make colorbars for each figure
if all(value is None for key, value in dims.items()) and self._cbars == []:
figs = self.get_figs(*args)
for fig in figs:
self._cbars.append(CbarManager(maps=figs[fig], fig=[fig], cbar={}, fmt=fmtBase(**{key:value for key,value in cbarops.items() if key not in ['names', 'times','vlst','levels']}), mapsobj = self, wind=wind))
# now update colorbar objects or create them if they are not existent
cbars = self.get_cbars(*args,**dims)
if cbars == []:
self._cbars.append(CbarManager(maps=get_func(**dims),fig = self.get_figs(*args, **dims).keys(), cbar={}, fmt=fmtBase(**{key:value for key,value in cbarops.items() if key not in dims.keys()}), mapsobj = self, wind=wind))
cbars = [self._cbars[-1]]
# now draw and update colorbars
for cbar in cbars:
# delete colorbars
if not todefault: cbarops = {key:value for key,value in cbarops.items() if key not in ['names', 'times', 'vlst', 'levels']}
else:
cbarops = {key:cbarops.get(key, value) for key, value in cbar.fmt._default.items() if (key not in cbarops and np.all(getattr(cbar.fmt, key) != cbar.fmt._default[key])) or (key in cbarops and np.all(cbarops[key] != getattr(cbar.fmt,key)))}
if 'plotcbar' in cbarops:
if cbarops['plotcbar'] in [False, None]: cbarops['plotcbar'] = ''
if cbarops['plotcbar'] == True: cbarops['plotcbar'] = 'b'
cbar._removecbar([cbarpos for cbarpos in cbar.fmt.plotcbar if cbarpos not in cbarops['plotcbar']])
cbar.fmt.update(**cbarops)
if cbar.fmt.bounds[0] in ['rounded', 'sym', 'minmax', 'roundedsym'] and len(cbar.fmt.bounds) == 2: cbar._bounds = returnbounds(map(lambda x: (np.min(x), np.max(x)), (mapo.data for mapo in cbar.maps)), cbar.fmt.bounds)
elif cbar.fmt.bounds[0] in ['rounded', 'sym', 'minmax', 'roundedsym'] and len(cbar.fmt.bounds) == 3: cbar._bounds = returnbounds(np.ma.concatenate(tuple(mapo.data for mapo in cbar.maps)), cbar.fmt.bounds)
else: cbar._bounds = cbar.fmt.bounds
cbar._cmap = get_cmap(cbar.fmt.cmap, N=len(cbar._bounds)-1)
cbar._norm = mpl.colors.BoundaryNorm(cbar._bounds, cbar._cmap.N)
for mapo in cbar.maps: mapo.fmt._enablebounds = False; mapo.fmt._bounds = cbar._bounds; mapo.fmt._cmap = cbar._cmap; mapo.make_plot()
cbar._draw_colorbar()
for fig in cbar.fig: plt.figure(fig.number); plt.draw()
if add: self._fmt.append(self.asdict('maps','cbars'))
if delete: self._newfmt = []
def get_cbars(self,*args,**kwargs):
"""Function to return the CbarManager related to the given input
Input:
- Arguments (args) may be instances of mapBase, figures or
CbarManagers
- Keyword arguments may be the one defined by get_maps
(names, vlst, times, levels).
Output:
list of CbarManager instances"""
maps = []
args = list(args)
cbars = [cbar for cbar in args if isinstance(cbar, CbarManager)]
if args == []: maps = self.get_maps(**kwargs)
elif args == ['wind']: maps = self.get_winds(**kwargs)
#elif all(isinstance(arg, mpl.figure.Figure) for arg in args if arg != 'wind'): maps = [mapo for fig, mapo in self.get_figs(*args).items()]
else:
figs = self.get_figs(*(arg for arg in args if arg == 'wind'))
for fig in figs:
if fig in args: maps = maps + figs[fig]; args.remove(fig)
maps = maps + list(arg for arg in args if not isinstance(arg,CbarManager))
cbars = cbars + [cbar for cbar in self._cbars if any(mapo in cbar.maps for mapo in maps)]
return cbars
def removecbars(self,*args, **kwargs):
"""Function to remove CbarManager instances from the plot.
args and kwargs are determined by the get_cbars function, i.e.
Input:
- Arguments (args) may be instances of mapBase, figures or
CbarManagers
- Keyword arguments may be the one defined by get_maps
(vlst, times, levels).
"""
cbars = self.get_cbars(*args,**kwargs)
for cbar in cbars:
for mapo in cbar.maps: mapo.fmt._enablebounds = True
cbar._removecbar()
self._cbars.pop(self._cbars.index(cbar))
def get_labels(self,*args):
"""Function to return the descriptions of the specific input
Input:
- Arguments (args) may be instances of mapBase, figures or
CbarManagers and
Output: names, vlst, times, levels, long_names, units
- names: list of mapBase instance names of the input (without duplicates)
- vlst: list of variables contained in the input (without duplicates)
- times: list of times contained in the input (without duplicates)
- levels: list of levels contained in the input (without duplicates)
- long_names: list of long_names of the variables in names (without duplicates)
- units: list of units of the variables in names (without duplicates)
return names, times, levels, long_names and units of the given mapobjects or the mapobjects in the given figures without duplicates"""
if args == (): return None
else:
args = list(args)
maps = []
figs = self.get_figs()
for fig in figs:
if fig in args: maps = maps + figs[fig]; args.remove(fig)
for cbar in self.get_cbars('wind') + self.get_cbars():
if cbar in args: maps = maps + cbar.maps; args.remove(cbar)
maps = maps + args
attrs = ['name', 'var','time','level', 'long_name', 'units']
tmpout = [[getattr(mapo, attr) for mapo in maps] for attr in attrs]
out = [[] for count in xrange(len(tmpout))]
for iattr in xrange(len(tmpout)):
for attr in tmpout[iattr]:
if attr not in out[iattr]: out[iattr].append(attr)
return out
def _setupnames(self, names, vlst, times, levels, sort):
# initialize names
newnames = []
nnames = len(vlst)*len(times)*len(levels)
# if names is none, initialize unique own names
if names is None:
if self.maps != []:
existingnames = [nametuple[0] for nametuple in self.names]
else:
existingnames = []
names = []
icounter = 0
i = 0
while i < nnames:
if 'mapo%i' % icounter not in existingnames:
names.append('mapo%i' % icounter)
i += 1
icounter += 1
else:
icounter += 1
#names = ['mapo%i' % i for i in xrange(nnames)]
# if tuple, means only one mapo
if isinstance(names, tuple):
if len(names) != 4:
raise ValueError("Either wrong length (has to be 4) of tuple or wrong type (must not be a tuple) of names!")
newnames = [names]
vlst = [names[1]]
times = [names[2]]
levels = [names[3]]
else:
if isinstance(names, str):
names = [names]
if isinstance(names[0], tuple):
if any(len(name) != 4 for name in names):
raise ValueError("Wrong length of name tuples (has to be 4: (name, var, time, level))!")
newnames = names
vlst = np.unique([name[1] for name in names]).tolist()
times = np.unique([name[2] for name in names]).tolist()
levels = np.unique([name[3] for name in names]).tolist()
else:
if len(names) != len(vlst)*len(times)*len(levels):
raise ValueError("Names has the wrong length (%i)! Expected %i!" % (
len(names), len(vlst)*len(times)*len(levels)))
for name, vtl in izip(names, product(vlst, times, levels)):
newnames.append((name, vtl[0], vtl[1], vtl[2]))
# --- resort names ---
if sort is not None:
if tuple(sort) in permutations(['t','v','l']):
sortlist = list(sort)
sortlist[sortlist.index('v')] = [var for var in vlst]
sortlist[sortlist.index('t')] = [time for time in times]
sortlist[sortlist.index('l')] = [level for level in levels]
sortlist = list(product(*sortlist))
names = newnames[:]
newnames = []
for sortitem in sortlist:
var = sortitem[sort.index('v')]
time = sortitem[sort.index('t')]
level = sortitem[sort.index('l')]
for nametuple in names:
if nametuple[1:] == (var, time, level):
newnames.append(nametuple)
# if not in the style of 'tvl', we assume that the names of mapBases are used for sorting
else:
names = newnames[:]
newnames = []
for name in sort:
for nametuple in names:
if nametuple[0] == name:
newnames.append(nametuple)
return newnames
def _setupfigs(self, names, fmt, subplots, nco=None, fromscratch=True):
"""set up the figures and map objects of the maps object"""
windonly = self.windonly
u = self.u
v = self.v
if windonly: mapo = windplot
else: mapo = fieldplot
if len(subplots) != len(names): raise ValueError('Number of given axes (' + str(len(subplots)) + ') does not fit to number of mapBase instances (' + str(len(names)) + ')!')
# setup axes
isubplot = 0
for name, var, time, level in names:
if fromscratch:
self.maps.append(mapo(self.fname, name=name, var=str(var), time=time, level=level, ax=subplots[isubplot], fmt=fmt[name], nco=nco, timenames = self.timenames, levelnames = self.levelnames, lon=self.lonnames, lat=self.latnames, ax_shapes=self._subplot_shape, ax_num=self._subplot_nums[isubplot], mapsin = self, u = u, v = v))
else:
mapo = self.get_maps(names=name, vlst=var, times=time, levels=level)[0]
if hasattr(mapo, 'cbar'): mapo._removecbar(); del mapo.cbar
if hasattr(mapo,'wind') and mapo.wind is not None: mapo.wind._removeplot()
mapo.ax = subplots[isubplot]
mapo._subplot_shape=self._subplot_shape
mapo._ax_num=self._subplot_nums[isubplot]
mapo.update(plot=False, todefault = True, **fmt[mapo.name])
isubplot+=1
return
def make_movie(self, output, fmt={}, onecbar = {}, steps = None, *args, **kwargs):
"""Function to create a movie with the current settings.
Input:
- output: string or 1D-array of strings. If string: <<<var>>>,
<<<time>>>, <<<level>>>, <<<long_name>>>, <<<unit>>> will be
replaced by the attributes contained in the figures. If 1D-
array: The length of the array must fit to the specified figures
- fmt: Dictionary (Default: {}). Formatoptions (same hierarchical
order as in the initialization function) where the values of the
formatoption keywords need to be 1D-arrays with the same length
as the number of steps of the movie (e.g. to modify the title of
variable 't2m' with three time steps:
fmt = {'t2m':{'title':['title1','title2','title3']}}).
- onecbar: Dictionary or list of dictionaries (Default: {}). Same
settings as for update_cbar function but (like fmt) with values
of formatoption keywords being 1D-arrays with same length as number
of steps
- steps: List of integers or None. If None, all timesteps in the
nc-file are used for the movie. Otherwise set the timesteps as a list
- Additional arguments (*args) and keyword arguments may be figures,
mapBase instances, vlst=[...], etc. as used in the get_figs func-
tion to specify the figures to make movies of.
- Furthermore any valid keyword of the FuncAnimation save function
can be set. Default value for writer is 'imagemagick', and extra_args
are ['-vcodec', 'libx264']. Please note, if filename is in the addi-
tional keywords, it will replace the output variable.
The additional keywords inherited from FuncAnimation.save function are
"""
# docstring will be extended below
# default options for kwargs if not 'vlst', 'times', etc.
defaults = {'dpi':None, 'fps':3, 'writer':'imagemagick', 'extra_args':['-vcodec', 'libx264']}
# options as set in kwargs
movieops = {key:value for key, value in kwargs.items() if key not in ['vlst','times','levels', 'wind']}
for key, value in defaults.items(): movieops.setdefault(key, value)
# delete options from kwargs
kwargs = {key:value for key, value in kwargs.items() if key in ['vlst','times','levels', 'wind']}
# reset output to 'filename' in movieops if given
if 'filename' in movieops: output = movieops.pop('filename')
fmt = self._setupfmt(fmt)
figs = self.get_figs(*args, **kwargs)
for fig in figs:
if isinstance(output, str):
names, vlst, times, levels, long_names, units = self.get_labels(fig)
out = self._replace(output, fig)
else:
out = output[i]
maps = figs[fig]
cbars = self.get_cbars(*maps)
if steps is None:
for timename in self.timenames:
try:
steps = range(self.nco.variables[maps[0].var]._shape()[self.nco.variables[maps[0].var].dimensions.index(timename)])
break
except ValueError: pass
# save bound options
bounds = [getattr(mapo.fmt, 'bounds') for mapo in maps]
windbounds = [getattr(mapo.wind.fmt, 'bounds') for mapo in maps if hasattr(mapo, 'wind') and mapo.wind is not None]
# modify bounds
print("Calculate bounds")
# handle the mapobject coordinated by one single cbar
for cbar in cbars:
if isinstance(cbar.maps[0], fieldplot):
if cbar.fmt.bounds[0] in ['rounded', 'sym', 'minmax', 'roundedsym']:
cbar.fmt.bounds = returnbounds(map(lambda x: (np.min(x), np.max(x)), chain(*((data[1] for data in mapo._moviedata(steps, nowind=True)) for mapo in [mapo for mapo in maps if mapo in cbar.maps]))), cbar.fmt.bounds)
# now handle the rest of the mapobjects
for mapo in (mapo for mapo in maps if all(mapo not in cbar.maps for cbar in cbars)):
if isinstance(mapo.fmt.bounds, tuple) and isinstance(mapo.fmt.bounds[0], str):
if isinstance(mapo, fieldplot):
if mapo.fmt.bounds[0] in ['rounded', 'sym', 'minmax', 'roundedsym']:
mapo.fmt.bounds = returnbounds(map(lambda x: (np.min(x), np.max(x)), (data[1] for data in mapo._moviedata(steps, nowind=True))), mapo.fmt.bounds)
if isinstance(mapo, windplot) or (hasattr(mapo, 'wind') and mapo.wind is not None and mapo.wind._bounds is not None):
if isinstance(mapo, windplot): wind = mapo
else: wind = mapo.wind
if wind.fmt.bounds[0] in ['rounded', 'sym', 'minmax', 'roundedsym']: wind.fmt.bounds = returnbounds(map(lambda x: (np.min(x), np.max(x)), (np.power(data[1]*data[1]+data[2]*data[2], 0.5) for data in wind._moviedata(steps))), wind.fmt.bounds)
# izip has no __len__ method which is required by the animation function. Therefore we define a subclass and use it for the data generator
class myizip(izip):
def __len__(self): return len(steps)
# data generator
if cbars != []: data_gen = myizip(myizip(*(mapo._moviedata(steps, **fmt[mapo.name]) for mapo in maps)), myizip(*(cbar._moviedata(steps, **onecbar) for cbar in cbars)))
else: data_gen = myizip(*(mapo._moviedata(steps, **fmt[mapo.name]) for mapo in maps))
# run function
if cbars != []: runmovie = lambda args: [mapo._runmovie(args[0][maps.index(mapo)]) for mapo in maps] + [cbar._runmovie(args[1][cbars.index(cbar)]) for cbar in cbars]
else: runmovie = lambda args: [mapo._runmovie(args[maps.index(mapo)]) for mapo in maps]
# movie initialization function
def init_func():
self.update({}, add = False, delete = False)
if self._cbars != []: self.update_cbar({}, add = False, delete = False)
print("Make movie")
ani = FuncAnimation(fig, runmovie, frames=data_gen, repeat=True, init_func=init_func)
if out == 'show': plt.show()
else: ani.save(out, **movieops)
if not out == 'show': print('Saved movie to ' + out)
# restore initial settings
self.update(self._fmt[-1][0], add=False, delete=False, todefault = True)
if self._fmt[-1][1] == []:
for cbar in self._cbars: cbar._removecbar()
else: self.update_cbar(*self._fmt[-1][1], add=False, delete=False, todefault = True)
def _setupfmt(self, oldfmt, names=None):
"""set up the fmt for the mapBase instances
if names is None: use self.names"""
# set up the dictionary for each variable
def removedims(fmt):
dims = list(chain(*map(lambda (name, var, t, l): (name, var, "t%i" % t, "l%i" % l), self.names + self.get_names())))
return {key: val for key, val in fmt.items() if key not in dims}
if names is None:
names = self.names[:]
if oldfmt is None:
return {name[0]: None for name in names}
fmt = {}
for name, var, time, level in names:
fmt[name] = removedims(oldfmt)
try:
fmt[name].update(removedims(oldfmt["l%i" % level]))
except KeyError:
pass
try:
fmt[name].update(removedims(oldfmt["t%i" % time]))
try:
fmt[name].update(removedims(oldfmt["t%i" % time]["l%i" % level]))
except KeyError:
pass
except KeyError:
pass
try:
fmt[name].update(removedims(oldfmt[var]))
try:
fmt[name].update(removedims(oldfmt[var]["t%i" % time]))
try:
fmt[name].update(removedims(oldfmt[var]["t%i" % time]["l%i" % level]))
except KeyError:
pass
except KeyError:
pass
except KeyError:
pass
fmt[name].update(removedims(oldfmt.get(name, {})))
return fmt
def script(self, output):
"""Function to create a script named output with the current formatoptions.
Experimental function! Please take care of bounds and colormaps in the output
script."""
import datetime as dt
import pickle
with open(output,'w') as f:
f.write("# -*- coding: utf-8 -*-\n# script for the generation of nc2map.maps object. Time created: """ + dt.datetime.now().strftime("%d/%m/%y %H:%M") + '\n' + "import nc2map\nimport pickle\nncfile = " + str(self.fname) + "\nnames = " + str(self.names) + "\ntimenames = " + str(self.timenames) + "\nlevelnames = " + str(self.levelnames) + "\nlon = " + str(self.lonnames) + "\nlat = " + str(self.latnames) + "\nsort = '" + str(self.sort) + "'\n")
# save dictionary to pickle object
with open(output.replace('.py', '.pkl'), 'w') as fmtf:
pickle.dump(self.asdict(), fmtf)
f.write("with open('%s') as f:\n fmt = pickle.load(f)\n" % output.replace('.py', '.pkl'))
openstring = "mymaps = nc2map.maps(ncfile=ncfile, names=names, fmt=fmt, lon=lon, lat=lat, timenames=timenames, levelnames=levelnames, sort=sort"
if self._cbars != []:
f.write("onecbar = " + str(self.asdict('cbars')) + "\n")
openstring = openstring + ", onecbar = onecbar"
if self._subplot_shape is not None:
f.write("ax = " + str(self._subplot_shape) + "\n")
openstring = openstring + ", ax=ax"
if self.figsize is not None:
f.write("figsize = " + str(self.figsize))
openstring = openstring + ", figsize=figsize"
f.write("\n" + openstring + ")")
def addmap(self, ncfile, names=None, vlst = None, times = 0, levels = 0, ax = (1,1), sort = 'vtl', fmt = None, onecbar = False, u=None, v=None):
"""add a mapBase instance to maps instance
Input:
- ncfile: Either string (or list of strings) or mapBase instance or list
of mapBase instances. If mapBase instance or list of mapBase instances,
all of the other keywords are obsolete. If string or list of strings:
Path to the netCDF-file containing the data for all variables.
Filenames may contain wildcards (*, ?, etc.) as suitable with the
Python glob module (the netCDF4.MFDataset is used to open the
nc-file). You can even give the same netCDF file multiple times, e.g.
for making one figure with plots for one variable, time and level but
different regions.
All the rest of keywords are the same as for the init function:
"""
# if single mapBase instance, just add it
if isinstance(ncfile, mapBase):
self.maps.append(ncfile)
return
# if many mapBase instances, just add them
if isinstance(ncfile[0], mapBase):
if any(not isinstance(mapo, mapBase) for mapo in ncfile):
raise ValueError("Found mixture of objects in Input. Please use only mapBase instances or strings!")
self.maps += np.ravel(ncfile).tolist()
return
# else, initialize them
try:
self.fname = glob.glob(ncfile)
except TypeError:
self.fname = ncfile
self.nco = self.fname
if vlst is None and not self.windonly:
vlst = [
str(key) for key in self.nco.variables.keys() if key not in self.lonnames+self.latnames+self.timenames+self.levelnames]
else:
if isinstance(vlst, str): vlst = [vlst]
if isinstance(times, int): times = [times]
if isinstance(levels, int): levels = [levels]
nametuples = self._setupnames(names=names, vlst=vlst, times=times, levels=levels, sort=sort)
names = [nametuple[0] for nametuple in nametuples]
nsub0 = len(self.subplots)
self.subplots = (ax, len(names))
self.names.append(nametuples)
self._setupfigs(nametuples, self._setupfmt(fmt, nametuples), self.subplots[nsub0:], self.nco)
print("Setting up projections...")
for mapo in self.get_maps(names=names): mapo._setupproj()
print("Making plots...")
self.make_plot(names=names)
if onecbar is not False:
if onecbar is True: self.update_cbar(*(dict(zip(['names', 'vlst','times','levels'], self.get_labels(fig)[:4])) for fig in self.get_figs()), add = False, delete = False)
elif isinstance(onecbar, dict): self.update_cbar(onecbar, add=False, delete = False)
else: self.update_cbar(*onecbar, add = False, delete = False)
for fig in self.get_figs(names=names):
names, vlst, times, levels, long_names, units = self.get_labels(fig)
fig.canvas.set_window_title('Figure ' + str(fig.number) + ': Variable ' + ','.join(var for var in vlst) + ', time ' + ', '.join(str(time) for time in times) + ', level ' + ', '.join(str(level) for level in levels))
# reset old fmts (for function undo)
self._fmt = [self.asdict('maps','cbars')]
# reset future fmts (for function redo)
self._newfmt = []
def asdict(self, *args, **kwargs):
"""returns the current formatoptions of all mapBase objects and
cbarmangers as dictionary.
Arguments may be
- 'maps' to return only the dictionary controlling the mapBase
instances (see formatoptions in the initialization) (Default)
- 'frominit' to return only the dictionary of the mapBase instances
which where created during intialization
- 'cbars' to return only the dictionary controlling the
CbarManager instances (see onecbar in the initialization)
Keyword argument may be any of get_maps, i.e.
- names
- vlst
- times
- levels
"""
# not used keyword:
#- reduced: Boolean (Default: True). Reduces the formatoptions
# such that if formatoption keywords are multiply set for more
# than one instances (e.g. for all variables), they will be
# put together. As an example:
# {<<<var1>>>:{<<<t1>>>>:{<<<l1>>>:{<<<keyword>>>:<<<value>>>}}},
# <<<var2>>>:{<<<t2>>>>:{<<<l2>>>:{<<<keyword>>>:<<<value>>>}}}}
# will be reduced to {<<<keyword>>>:<<<value>>>} (as it is suitable
# for update and initialization function but shorter).
#- initcompatible: Boolean (Default: False). Returns the diction-
# ary in such a way that the maps object can be reiniatlized
# (i.e. with the original variables, times and levels as keys).
fmt = {}
cbars = []
if args == () or 'maps' in args or 'frominit' in args:
returnfmt = True
if 'frominit' in args:
kwargs['names'] = self._namesfrominit
for mapo in self.get_maps(**kwargs):
fmt[mapo.name] = mapo.asdict()
else: returnfmt = False
if 'cbars' in args:
returncbars = True
for cbar in self.get_cbars(**kwargs):
names, vlst, times, levels, long_names, units = self.get_labels(*cbar.maps)
nametuples = self.get_names(names=names)
cbars.append({key:value for key,value in cbar.fmt.asdict().items()})
if nametuples != self.names:
cbars[-1]['names'] = nametuples
else: returncbars = False
if returnfmt and returncbars: return (fmt, cbars)
if returnfmt: return (fmt)
if returncbars: return (cbars)
def get_fmtkeys(self,*args):
"""Function which returns a dictionary containing all possible
formatoption settings as keys and their documentation as value.
Arguments (*args) may be any keyword of the formatoptions plus
wind (to plot the 'wind' formatoption keywords) and 'windonly'
(to plot the wind only formatoption keywords (i.e. not
projection keywords, etc.))"""
if not 'wind' in args: return self.get_maps()[0].get_fmtkeys(*args)
else: return self.get_winds()[0].get_fmtkeys(*args)
def show_fmtkeys(self,*args):
"""Function which prints the keys and documentations in a readable
manner.
Arguments (*args) may be any keyword of the formatoptions
(without: Print all), plus wind (to plot the 'wind' formatoption
keywords) and 'windonly' (to plot the wind only formatoption key-
words (i.e. not projection keywords, etc.))"""
if not 'wind' in args: self.get_maps()[0].show_fmtkeys(*args)
else: self.get_winds()[0].show_fmtkeys(*args)
# ------------------ modify docstrings here --------------------------
__init__.__doc__ += "\n%s\n\nAnd the windplot specific options are\n\n%s" % (
# formatoptions keywords
'\n'.join((key+':').ljust(20) + val
for key, val in sorted(formatoptions.get_fmtkeys().items())),
# wind options keywords
'\n'.join((key+':').ljust(20) + val
for key, val in sorted(formatoptions.get_fmtkeys('wind', 'windonly').items())))
addmap.__doc__ += __init__.__doc__[__init__.__doc__.find('- names'):]
output.__doc__ = output.__doc__ + plt.savefig.__doc__[plt.savefig.__doc__.find('Keyword arguments:') + len('Keyword arguments:\n'):]
make_movie.__doc__ = make_movie.__doc__ + ' ' + FuncAnimation.save.__doc__[FuncAnimation.save.__doc__.find('*'):]
evaluate.__doc__ += '\n - '.join(evaluatorsdict.keys())
eval_doc.__doc__ += '\n - '.join(evaluatorsdict.keys())
# ------------------ modify docstrings on modular level here --------------------------
update.__doc__ = update.__doc__ + '\n' + maps.update.__doc__
| gpl-2.0 |
NunoEdgarGub1/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
fabioticconi/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 127 | 1732 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/tests/arrays/integer/test_comparison.py | 9 | 4005 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base import BaseOpsUtil
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
def test_equals():
# GH-30652
# equals is generally tested in /tests/extension/base/methods, but this
# specifically tests that two arrays of the same class but different dtype
# do not evaluate equal
a1 = pd.array([1, 2, None], dtype="Int64")
a2 = pd.array([1, 2, None], dtype="Int32")
assert a1.equals(a2) is False
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
INM-6/hybridLFPy | examples/example_brunel_alpha_topo_exp.py | 1 | 24733 | #!/usr/bin/env python
'''
Hybrid LFP scheme example script, applying the methodology with a model
implementation similar to:
Nicolas Brunel. "Dynamics of Sparsely Connected Networks of Excitatory and
Inhibitory Spiking Neurons". J Comput Neurosci, May 2000, Volume 8,
Issue 3, pp 183-208
But the network is implemented with spatial connectivity, i.e., the neurons
are assigned positions and distance-dependent connectivity in terms of
cell-cell connectivity and transmission delays.
Synopsis of the main simulation procedure:
1. Loading of parameterset
a. network parameters
b. parameters for hybrid scheme
2. Set up file destinations for different simulation output
3. network simulation
a. execute network simulation using NEST (www.nest-initiative.org)
b. merge network output (spikes, currents, voltages)
4. Create a object-representation that uses sqlite3 of all the spiking output
5. Iterate over post-synaptic populations:
a. Create Population object with appropriate parameters for
each specific population
b. Run all computations for populations
c. Postprocess simulation output of all cells in population
6. Postprocess all cell- and population-specific output data
7. Create a tarball for all non-redundant simulation output
The full simulation can be evoked by issuing a mpirun call, such as
mpirun -np 4 python example_brunel_alpha_topo_exp.py
Not recommended, but running it serially should also work, e.g., calling
python example_brunel_alpha_topo_exp.py
Given the size of the network and demands for the multi-compartment LFP-
predictions using the present scheme, running the model on nothing but a large-
scale compute facility is strongly discouraged.
'''
import brunel_alpha_nest_topo_exp as BN
import os
import numpy as np
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from time import time
import nest # not used here, but for running network sim in parallel with MPI
from hybridLFPy import PostProcess, setup_file_dest, CachedTopoNetwork, \
TopoPopulation
# from meso_analysis import CachedTopoNetwork, TopoPopulation
from parameters import ParameterSet
import h5py
import lfpykit
import neuron
from mpi4py import MPI
# matplotlib settings ###########################################
plt.close('all')
plt.rcParams.update({'figure.figsize': [10.0, 8.0]})
# set some seed values
SEED = 12345678
SIMULATIONSEED = 12345678
np.random.seed(SEED)
# Initialization of MPI stuff ############################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# if True, execute full model. If False, do only the plotting.
# Simulation results must exist.
PROPERRUN = True
# check if mod file for synapse model specified in alphaisyn.mod is loaded
if not hasattr(neuron.h, 'AlphaISyn'):
if RANK == 0:
os.system('nrnivmodl')
COMM.Barrier()
neuron.load_mechanisms('.')
##########################################################################
# PARAMETERS
##########################################################################
# Set up parameters using the NeuroTools.parameters.ParameterSet class.
# Access parameters defined in example script implementing the network using
# pynest, brunel_alpha_nest_topo.py, adapted from the NEST v2.4.1 release.
# This will not execute the network model, but see below.
# set up file destinations differentiating between certain output
PS = ParameterSet(dict(
# Main folder of simulation output
savefolder='simulation_output_example_brunel_topo_exp',
# make a local copy of main files used in simulations
sim_scripts_path=os.path.join('simulation_output_example_brunel_topo_exp',
'sim_scripts'),
# destination of single-cell output during simulation
cells_subfolder='cells',
cells_path=os.path.join(
'simulation_output_example_brunel_topo_exp',
'cells'),
# destination of cell- and population-specific signals, i.e.,
# compund LFPs, CSDs etc.
populations_subfolder='populations',
populations_path=os.path.join('simulation_output_example_brunel_topo_exp',
'populations'),
# location of spike output from the network model
spike_output_path=BN.spike_output_path,
# destination of figure file output generated during model execution
figures_subfolder='figures',
figures_path=os.path.join('simulation_output_example_brunel_topo_exp',
'figures'),
))
# population (and cell type) specific parameters
PS.update(dict(
# no cell type specificity within each E-I population
# hence X == x and Y == X
X=["EX", "IN"],
# population-specific LFPy.Cell parameters
cellParams=dict(
# excitory cells
EX=dict(
morphology='morphologies/ex.swc',
v_init=BN.neuron_params['E_L'],
cm=1.0,
Ra=150,
passive=True,
passive_parameters=dict(g_pas=1. / (BN.neuron_params['tau_m']
* 1E3), # assume cm=1
e_pas=BN.neuron_params['E_L']),
nsegs_method='lambda_f',
lambda_f=100,
dt=BN.dt,
tstart=0,
tstop=BN.simtime,
verbose=False,
),
# inhibitory cells
IN=dict(
morphology='morphologies/in.swc',
v_init=BN.neuron_params['E_L'],
cm=1.0,
Ra=150,
passive=True,
passive_parameters=dict(g_pas=1. / (BN.neuron_params['tau_m']
* 1E3),
e_pas=BN.neuron_params['E_L']),
nsegs_method='lambda_f',
lambda_f=100,
dt=BN.dt,
tstart=0,
tstop=BN.simtime,
verbose=False,
)),
# assuming excitatory cells are pyramidal
rand_rot_axis=dict(
EX=['z'],
IN=['x', 'y', 'z'],
),
# kwargs passed to LFPy.Cell.simulate()
simulationParams=dict(),
# set up parameters corresponding to model populations, the x-y coordinates
# will use position-data from network simulation, but sliced if more than
# one cell type y is assigned to represent a main population Y
populationParams=dict(
EX=dict(
number=BN.NE,
position_index_in_Y=['EX', 0],
z_min=-450,
z_max=-350,
),
IN=dict(
number=BN.NI,
position_index_in_Y=['IN', 0],
z_min=-450,
z_max=-350,
),
),
# set the boundaries between the "upper" and "lower" layer
layerBoundaries=[[0., -300],
[-300, -500]],
# set the geometry of the virtual recording device
electrodeParams=dict(
# contact locations:
x=np.meshgrid(np.linspace(-1800, 1800, 10),
np.linspace(-1800, 1800, 10))[0].flatten(),
y=np.meshgrid(np.linspace(-1800, 1800, 10),
np.linspace(-1800, 1800, 10))[1].flatten(),
z=[-400. for x in range(100)],
# extracellular conductivity:
sigma=0.3,
# contact surface normals, radius, n-point averaging
N=[[0, 0, 1]] * 100,
r=5,
n=20,
seedvalue=None,
# dendrite line sources, soma as sphere source (Linden2014)
method='root_as_point',
),
# runtime, cell-specific attributes and output that will be stored
savelist=[
],
# time resolution of saved signals
dt_output=BN.dt * 2
))
# for each population, define layer- and population-specific connectivity
# parameters
PS.update(dict(
# number of connections from each presynaptic population onto each
# layer per postsynaptic population, preserving overall indegree
k_yXL=dict(
EX=[[int(0.5 * BN.CE), 0],
[int(0.5 * BN.CE), BN.CI]],
IN=[[0, 0],
[BN.CE, BN.CI]],
),
# set up table of synapse PSCs from each possible connection
J_yX=dict(
EX=[BN.J_ex * 1E-3, BN.J_in * 1E-3],
IN=[BN.J_ex * 1E-3, BN.J_in * 1E-3],
),
# set up synapse parameters as derived from the network
synParams=dict(
EX=dict(
section=['apic', 'dend'],
tau=BN.tauSyn,
syntype='AlphaISyn'
),
IN=dict(
section=['dend'],
tau=BN.tauSyn,
syntype='AlphaISyn'
),
),
# set up delays, here using fixed delays of network
synDelayLoc=dict(
EX=[None, None],
IN=[None, None],
),
# no distribution of delays
synDelayScale=dict(
EX=[None, None],
IN=[None, None],
),
# For topology-like connectivity. Only exponential connectivity and
# circular masks are supported with fixed indegree given by k_yXL,
# using information on extent and edge wrap (periodic boundaries).
# At present, synapse delays are not distance-dependent. For speed,
# multapses are always allowed.
# Information is here duplicated for each postsynaptic population as in the
# network, but it could potentially be set per postsynaptic population
topology_connections=dict(
EX={
X: dict(
extent=BN.layerdict_EX['extent'],
edge_wrap=BN.layerdict_EX['edge_wrap'],
allow_autapses=BN.conn_dict_EX['allow_autapses'],
kernel=BN.conn_kernel_EX,
mask=BN.conn_dict_EX['mask'],
delays=BN.conn_delay_EX,
) for X in ['EX', 'IN']},
IN={
X: dict(
extent=BN.layerdict_IN['extent'],
edge_wrap=BN.layerdict_IN['edge_wrap'],
allow_autapses=BN.conn_dict_IN['allow_autapses'],
kernel=BN.conn_kernel_IN,
mask=BN.conn_dict_IN['mask'],
delays=BN.conn_delay_IN,
) for X in ['EX', 'IN']},
)
))
# putative mappting between population type and cell type specificity,
# but here all presynaptic senders are also postsynaptic targets
PS.update(dict(
mapping_Yy=list(zip(PS.X, PS.X))
))
#########################################################################
# MAIN simulation procedure #
#########################################################################
# tic toc
tic = time()
# ######## Perform network simulation #####################################
if PROPERRUN:
# set up the file destination, removing old results by default
setup_file_dest(PS, clearDestination=True)
if PROPERRUN:
# run network simulation
BN.run_model()
# wait for the network simulation to finish, resync MPI threads
COMM.Barrier()
# Create an object representation containing the spiking activity of the
# network simulation output that uses sqlite3. Again, kwargs are derived from
# the brunel network instance.
networkSim = CachedTopoNetwork(
simtime=BN.simtime,
dt=BN.dt,
spike_output_path=BN.spike_output_path,
label=BN.label,
ext='dat',
GIDs={'EX': [1, BN.NE], 'IN': [BN.NE + 1, BN.NI]},
label_positions=BN.label_positions,
cmap='bwr_r',
skiprows=3,
)
toc = time() - tic
print('NEST simulation and gdf file processing done in %.3f seconds' % toc)
##############################################################################
# Create predictor for extracellular potentials that utilize periodic
# boundary conditions in 2D, similar to network connectivity
##############################################################################
class PeriodicLFP(lfpykit.RecExtElectrode):
'''
Modified version of lfpykit.RecExtElectrode that incorporates periodic
boundary conditions for electrostatic forward models in 2D.
Parameters
----------
side_length: float > 0
periodicity along lateral x and y direction
order: int >= 1
**kwargs:
lfpykit.RecExtElectrode parameters
'''
def __init__(self, side_length=4000., order=1, **kwargs):
"""Initialize RecExtElectrode class"""
super().__init__(**kwargs)
self._get_transformation_matrix = super().get_transformation_matrix
self.side_length = side_length
self.order = order
def get_transformation_matrix(self):
'''
Get linear response matrix
Returns
-------
response_matrix: ndarray
shape (n_contacts, n_seg) ndarray
Raises
------
AttributeError
if `cell is None`
'''
if self.cell is None:
raise AttributeError(
'{}.cell is None'.format(self.__class__.__name__))
M = np.zeros((self.x.size, self.cell.totnsegs))
for i in range(-self.order, self.order + 1):
for j in range(-self.order, self.order + 1):
x = self.cell.x.copy()
y = self.cell.y.copy()
self.cell.x = self.cell.x + i * self.side_length
self.cell.y = self.cell.y + j * self.side_length
M += self._get_transformation_matrix()
self.cell.x = x
self.cell.y = y
return M
# #### Set up LFPykit measurement probes for LFPs and CSDs
if PROPERRUN:
probes = []
probes.append(PeriodicLFP(cell=None, **PS.electrodeParams))
###############################################
# Set up populations #
###############################################
if PROPERRUN:
# iterate over each cell type, and create populationulation object
for i, Y in enumerate(PS.X):
# create population:
pop = TopoPopulation(
cellParams=PS.cellParams[Y],
rand_rot_axis=PS.rand_rot_axis[Y],
simulationParams=PS.simulationParams,
populationParams=PS.populationParams[Y],
y=Y,
layerBoundaries=PS.layerBoundaries,
savelist=PS.savelist,
savefolder=PS.savefolder,
probes=probes,
dt_output=PS.dt_output,
POPULATIONSEED=SIMULATIONSEED + i,
X=PS.X,
networkSim=networkSim,
k_yXL=PS.k_yXL[Y],
synParams=PS.synParams[Y],
synDelayLoc=PS.synDelayLoc[Y],
synDelayScale=PS.synDelayScale[Y],
J_yX=PS.J_yX[Y],
# TopoPopulation kwargs
topology_connections=PS.topology_connections,
# time res
)
# run population simulation and collect the data
pop.run()
pop.collect_data()
# object no longer needed
del pop
####### Postprocess the simulation output ################################
# reset seed, but output should be deterministic from now on
np.random.seed(SIMULATIONSEED)
if PROPERRUN:
# do some postprocessing on the collected data, i.e., superposition
# of population LFPs, CSDs etc
postproc = PostProcess(y=PS.X,
dt_output=PS.dt_output,
savefolder=PS.savefolder,
mapping_Yy=PS.mapping_Yy,
savelist=PS.savelist,
probes=probes,
cells_subfolder=PS.cells_subfolder,
populations_subfolder=PS.populations_subfolder,
figures_subfolder=PS.figures_subfolder
)
# run through the procedure
postproc.run()
# create tar-archive with output for plotting, ssh-ing etc.
postproc.create_tar_archive()
COMM.Barrier()
# tic toc
print('Execution time: %.3f seconds' % (time() - tic))
##########################################################################
# Create set of plots from simulation output
##########################################################################
def network_activity_animation(PS, networkSim,
T=(0, 200), kernel=np.exp(-np.arange(10) / 2),
save_anim=True):
'''network activity animation'''
fig, ax = plt.subplots(1, figsize=(9, 10))
fig.subplots_adjust(left=0.12, right=0.97, bottom=0.15, top=0.975)
ax.set_aspect('equal')
ax.set_xlim(
(-BN.layerdict_EX['extent'][0] / 2,
BN.layerdict_EX['extent'][0] / 2))
ax.set_ylim(
(-BN.layerdict_EX['extent'][1] / 2,
BN.layerdict_EX['extent'][1] / 2))
ax.set_xlabel('x (um)', labelpad=0)
ax.set_ylabel('y (um)', labelpad=0)
ax.set_title('t=%.3i ms' % 100)
dt = PS.dt_output
tbins = np.arange(T[0], T[1] + dt, dt)
spikes = {}
scat = {}
for j, X in enumerate(PS.X):
db = networkSim.dbs[X]
gid = networkSim.nodes[X]
gid_t = np.asarray(db.select_neurons_interval(gid, T), dtype=object)
spikes[X] = np.zeros(gid_t.shape[0],
dtype=[('pos', float, 2),
('size', float, tbins.size - 1)])
# set position arrays
spikes[X]['pos'] = networkSim.positions[X]
# set size arrays
for i, t in enumerate(gid_t):
spikes[X]['size'][i, :] = np.convolve(
np.histogram(t, bins=tbins)[0] * 200, kernel, 'same')
# scatter plot of positions, will not be shown in animation
scat[X] = ax.scatter(
spikes[X]['pos'][:, 0], spikes[X]['pos'][:, 1],
s=np.random.rand(spikes[X]['size'].shape[0]) * 100,
facecolors=networkSim.colors[j], edgecolors='none', label=X)
# set legend
ax.legend(loc=(0.65, -0.2), ncol=3, fontsize=10, frameon=False)
def update(frame_number):
'''update function for animation'''
ind = frame_number % (tbins.size - 1)
for j, X in enumerate(PS.X):
scat[X].set_sizes(spikes[X]['size'][:, ind])
ax.set_title('t=%.3i ms' % tbins[ind])
ani = FuncAnimation(fig, update, frames=tbins.size, interval=1)
if save_anim:
ani.save(os.path.join(PS.savefolder, 'NetworkTopo.mp4'),
fps=15, writer='ffmpeg',
extra_args=['-b:v', '5000k', '-r', '25', '-vcodec', 'mpeg4'],)
# plt.show()
def lfp_activity_animation(PS, networkSim,
T=(0, 200), kernel=np.exp(-np.arange(10) / 2),
save_anim=True):
'''animation of network activity and LFP data'''
fig, ax = plt.subplots(1, figsize=(9, 10))
fig.subplots_adjust(left=0.12, right=0.97, bottom=0.15, top=0.975)
cbax = fig.add_axes([0.4, 0.1, 0.2, 0.02])
ax.set_aspect('equal')
ax.set_xlim(
(-BN.layerdict_EX['extent'][0] / 2,
BN.layerdict_EX['extent'][0] / 2))
ax.set_ylim(
(-BN.layerdict_EX['extent'][1] / 2,
BN.layerdict_EX['extent'][1] / 2))
ax.set_xlabel('x (um)', labelpad=0)
ax.set_ylabel('y (um)', labelpad=0)
ax.set_title('t=%.3i ms' % 100)
dt = PS.dt_output
tbins = np.arange(T[0], T[1] + dt, dt)
# electrode geometry
ax.scatter(PS.electrodeParams['x'], PS.electrodeParams['y'],
s=20, color='k')
# LFP data
fname = os.path.join(PS.savefolder, 'PeriodicLFP_sum.h5')
f = h5py.File(fname)
data = f['data'][()]
# subtract mean
dataT = data.T - data.mean(axis=1)
data = dataT.T
# reshape
data = data.reshape(
(int(np.sqrt(PS.electrodeParams['x'].size)), -1, data.shape[-1]))
# draw image plot on axes
im = ax.pcolormesh(np.r_[0:4001:400] -
2000, np.r_[0:4001:400] -
2000, data[:, :, 0], vmin=-
data.std() *
4, vmax=data.std() *
4, zorder=-
1, cmap='jet_r')
cbar = plt.colorbar(im, cax=cbax, orientation='horizontal')
cbar.set_label('LFP (mV)', labelpad=0)
tclbls = cbar.ax.get_xticklabels()
plt.setp(tclbls, rotation=90, fontsize=10)
def update(frame_number):
'''update function for animation'''
ind = frame_number % (tbins.size - 1)
im.set_array(data[:, :, ind].flatten())
ax.set_title('t=%.3i ms' % tbins[ind])
ani = FuncAnimation(fig, update, frames=tbins.size, interval=1)
if save_anim:
ani.save(
os.path.join(
PS.savefolder,
'LFPTopo.mp4'),
fps=15,
writer='ffmpeg',
extra_args=[
'-b:v',
'5000k',
'-r',
'25',
'-vcodec',
'mpeg4'],
)
# plt.show()
def network_lfp_activity_animation(PS, networkSim, T=(
0, 200), kernel=np.exp(-np.arange(10) / 2), save_anim=True):
'''animation of network activity and LFP data'''
fig, ax = plt.subplots(1, figsize=(9, 10))
fig.subplots_adjust(left=0.12, right=0.97, bottom=0.15, top=0.975)
cbax = fig.add_axes([0.4, 0.1, 0.2, 0.02])
ax.set_aspect('equal')
ax.set_xlim(
(-BN.layerdict_EX['extent'][0] / 2,
BN.layerdict_EX['extent'][0] / 2))
ax.set_ylim(
(-BN.layerdict_EX['extent'][1] / 2,
BN.layerdict_EX['extent'][1] / 2))
ax.set_xlabel('x (um)', labelpad=0)
ax.set_ylabel('y (um)', labelpad=0)
ax.set_title('t=%.3i ms' % 100)
dt = PS.dt_output
tbins = np.arange(T[0], T[1] + dt, dt)
spikes = {}
scat = {}
for j, X in enumerate(PS.X):
db = networkSim.dbs[X]
gid = networkSim.nodes[X]
gid_t = np.asarray(db.select_neurons_interval(gid, T), dtype=object)
spikes[X] = np.zeros(gid_t.shape[0],
dtype=[('pos', float, 2),
# dtype=[('pos', float,
# networkSim.positions[X].shape),
('size', float, tbins.size - 1)])
# set position arrays
spikes[X]['pos'] = networkSim.positions[X]
# set size arrays
for i, t in enumerate(gid_t):
spikes[X]['size'][i, :] = np.convolve(
np.histogram(t, bins=tbins)[0] * 200, kernel, 'same')
# scatter plot of positions, will not be shown in animation
scat[X] = ax.scatter(
spikes[X]['pos'][:, 0], spikes[X]['pos'][:, 1],
s=np.random.rand(spikes[X]['size'].shape[0]) * 100,
facecolors=networkSim.colors[j], edgecolors='none', label=X)
# set legend
ax.legend(loc=(0.65, -0.2), ncol=3, fontsize=10, frameon=False)
# electrode geometry
ax.scatter(PS.electrodeParams['x'], PS.electrodeParams['y'],
s=20, color='k')
# LFP data
fname = os.path.join(PS.savefolder, 'PeriodicLFP_sum.h5')
f = h5py.File(fname)
data = f['data'][()]
# subtract mean
dataT = data.T - data.mean(axis=1)
data = dataT.T
# reshape
data = data.reshape(
(int(np.sqrt(PS.electrodeParams['x'].size)), -1, data.shape[-1]))
# draw image plot on axes
im = ax.pcolormesh(np.r_[0:4001:400] -
2000, np.r_[0:4001:400] -
2000, data[:, :, 0], vmin=-
data.std() *
4, vmax=data.std() *
4, zorder=-
1, cmap='jet_r')
cbar = plt.colorbar(im, cax=cbax, orientation='horizontal')
cbar.set_label('LFP (mV)', labelpad=0)
tclbls = cbar.ax.get_xticklabels()
plt.setp(tclbls, rotation=90, fontsize=10)
def update(frame_number):
'''update function for animation'''
ind = frame_number % (tbins.size - 1)
for j, X in enumerate(PS.X):
scat[X].set_sizes(spikes[X]['size'][:, ind])
im.set_array(data[:, :, ind].flatten())
ax.set_title('t=%.3i ms' % tbins[ind])
ani = FuncAnimation(fig, update, frames=tbins.size, interval=1)
if save_anim:
ani.save(
os.path.join(
PS.savefolder,
'hybridLFPyTopo.mp4'),
fps=15,
writer='ffmpeg',
extra_args=[
'-b:v',
'5000k',
'-r',
'25',
'-vcodec',
'mpeg4'],
)
# plt.show()
if RANK == 0:
network_activity_animation(PS, networkSim, save_anim=True)
# plt.show()
if RANK == 0:
lfp_activity_animation(PS, networkSim, save_anim=True)
if RANK == 0:
network_lfp_activity_animation(PS, networkSim, save_anim=True)
| gpl-3.0 |
robbymeals/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
Fireblend/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Unidata/MetPy | v0.6/_downloads/meteogram_metpy.py | 2 | 9460 | # Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Meteogram
=========
Plots time series data as a meteogram.
"""
import datetime as dt
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import dewpoint_rh
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
from metpy.units import units
def calc_mslp(t, p, h):
return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257)
# Make meteogram plot
class Meteogram(object):
""" Plot a time series of meteorological data from a particular station as a
meteogram with standard variables to visualize, including thermodynamic,
kinematic, and pressure. The functions below control the plotting of each
variable.
TO DO: Make the subplot creation dynamic so the number of rows is not
static as it is currently. """
def __init__(self, fig, dates, probeid, time=None, axis=0):
"""
Required input:
fig: figure object
dates: array of dates corresponding to the data
probeid: ID of the station
Optional Input:
time: Time the data is to be plotted
axis: number that controls the new axis to be plotted (FOR FUTURE)
"""
if not time:
time = dt.datetime.utcnow()
self.start = dates[0]
self.fig = fig
self.end = dates[-1]
self.axis_num = 0
self.dates = mpl.dates.date2num(dates)
self.time = time.strftime('%Y-%m-%d %H:%M UTC')
self.title = 'Latest Ob Time: {0}\nProbe ID: {1}'.format(self.time, probeid)
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
plt.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
plt.ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
ln2 = self.ax1.plot(self.dates,
wsmax,
'.r',
label='3-sec Wind Speed Max')
plt.setp(self.ax1.get_xticklabels(), visible=True)
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates,
wd,
'.k',
linewidth=0.5,
label='Wind Direction')
plt.ylabel('Wind\nDirection\n(degrees)', multialignment='center')
plt.ylim(0, 360)
plt.yticks(np.arange(45, 405, 90), ['NE', 'SE', 'SW', 'NW'])
lns = ln1 + ln2 + ln3
labs = [l.get_label() for l in lns]
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12})
def plot_thermo(self, t, td, plot_range=None):
"""
Required input:
T: Temperature (deg F)
TD: Dewpoint (deg F)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT TEMPERATURE AND DEWPOINT
if not plot_range:
plot_range = [10, 90, 2]
self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1)
ln4 = self.ax2.plot(self.dates,
t,
'r-',
label='Temperature')
plt.fill_between(self.dates,
t,
td,
color='r')
plt.setp(self.ax2.get_xticklabels(), visible=True)
plt.ylabel('Temperature\n(F)', multialignment='center')
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2])
ln5 = self.ax2.plot(self.dates,
td,
'g-',
label='Dewpoint')
plt.fill_between(self.dates,
td,
plt.ylim()[0],
color='g')
ax_twin = self.ax2.twinx()
# ax_twin.set_ylim(20,90,2)
ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
lns = ln4 + ln5
labs = [l.get_label() for l in lns]
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax2.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12})
def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates,
rh,
'g-',
label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
plt.setp(self.ax3.get_xticklabels(), visible=True)
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
plt.fill_between(self.dates, rh, plt.ylim()[0], color='g')
plt.ylabel('Relative Humidity\n(%)', multialignment='center')
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
def plot_pressure(self, p, plot_range=None):
"""
Required input:
P: Mean Sea Level Pressure (hPa)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT PRESSURE
if not plot_range:
plot_range = [970, 1030, 2]
self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1)
self.ax4.plot(self.dates,
p,
'm',
label='Mean Sea Level Pressure')
plt.ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center')
plt.ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin = self.ax4.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
plt.fill_between(self.dates, p, plt.ylim()[0], color='m')
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12})
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
plt.setp(self.ax4.get_xticklabels(), visible=True)
# OTHER OPTIONAL AXES TO PLOT
# plot_irradiance
# plot_precipitation
# set the starttime and endtime for plotting, 24 hour range
endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0)
starttime = endtime - dt.timedelta(hours=24)
# Height of the station to calculate MSLP
hgt_example = 292.
# Parse dates from .csv file, knowing their format as a string and convert to datetime
def parse_date(date):
return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S')
testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None,
usecols=list(range(1, 8)),
converters={'DATE': parse_date}, delimiter=',')
# Temporary variables for ease
temp = testdata['T']
pres = testdata['P']
rh = testdata['RH']
ws = testdata['WS']
wsmax = testdata['WSMAX']
wd = testdata['WD']
date = testdata['DATE']
# ID For Plotting on Meteogram
probe_id = '0102A'
data = {'wind_speed': (np.array(ws) * units('m/s')).to(units('knots')),
'wind_speed_max': (np.array(wsmax) * units('m/s')).to(units('knots')),
'wind_direction': np.array(wd) * units('degrees'),
'dewpoint': dewpoint_rh((np.array(temp) * units('degC')).to(units('K')),
np.array(rh) / 100.).to(units('degF')),
'air_temperature': (np.array(temp) * units('degC')).to(units('degF')),
'mean_slp': calc_mslp(np.array(temp), np.array(pres), hgt_example) * units('hPa'),
'relative_humidity': np.array(rh), 'times': np.array(date)}
fig = plt.figure(figsize=(20, 16))
add_metpy_logo(fig, 250, 180)
meteogram = Meteogram(fig, data['times'], probe_id)
meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max'])
meteogram.plot_thermo(data['air_temperature'], data['dewpoint'])
meteogram.plot_rh(data['relative_humidity'])
meteogram.plot_pressure(data['mean_slp'])
fig.subplots_adjust(hspace=0.5)
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/feature_stacker.py | 50 | 1910 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
metinsay/docluster | docluster/models/word_embedding/word2vec.py | 1 | 14211 | import collections
import math
import multiprocessing
import os
import random
import threading
from copy import deepcopy
import pandas as pd
import numpy as np
import tensorflow as tf
from docluster.core import Model
from docluster.core.document_embedding import TfIdf
from docluster.core.preprocessing import Preprocessor, TokenFilter
from docluster.utils.constants import DistanceMetric, FileType
from docluster.utils.data_fetcher import FileFetcher
from docluster.utils.data_saver import FileSaver
from scipy.special import expit
from .word_embeddings import WordEmbeddings
class Word2Vec(Model):
def __init__(self, preprocessor=None, n_skips=16, n_negative_samples=100, n_words=10000, embedding_size=100, batch_size=32, window_size=10, learning_rate=0.025, n_epochs=1, n_workers=4, do_plot=False):
"""
A Skip-Gram model Word2Vec with multi-thread training capability.
Paramaters:
-----------
preprocessor : Preprocessor
The preprocessor that will tokenize the documents.
The default one also filters punctuation, tokens with numeric
characters and one letter words. Furthermore, no stemming or
lemmatization is applied. All these can be adjusted
by passing a custom preprocessor.
n_skip : int
The number of skips.
n_negative_samples : int
The number of negative samples that are going to collected for each
batch.
n_words : int
The number of words that the vocabulary will have. The filtering is
based on the word frequency. Therefore, less frequent words will not
be included in the vocabulary.
embedding_size : int
The size of the embedding vectors. Usually the more makes the embeddings
more accurate, but this is not always the case. Increasing the size
dramatically affects trainning time.
batch_size : int
The batch size.
window_size : int
The window size where the words to the left and to the right of the words
will give context to the word.
learning_rate : int
The initial learning rate of the gradient decent.
n_epochs : int
The number of epoches the model is going to be trained. Increasing the number
dramatically affects trainning time.
n_workers : int
The number of workers that is going to train the model concurrently.
It is not recommended to use more than the number of core.
do_plot : bool
Attributes:
-----------
embeddings :
The embedding vectors that represents each word
"""
if preprocessor is None:
additional_filters = [lambda token: len(token) == 1]
token_filter = TokenFilter(filter_stop_words=False,
additional_filters=additional_filters)
preprocessor = Preprocessor(do_stem=False, do_lemmatize=False,
parse_html=False, token_filter=token_filter, lower=False)
self.preprocessor = preprocessor
self.n_skips = n_skips
self.n_negative_samples = n_negative_samples
self.embedding_size = embedding_size
self.batch_size = batch_size
self.window_size = window_size
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.n_words = n_words
self.n_workers = n_workers
self._total_loss = 0
self._dist_metric = DistanceMetric.cosine
self.embeddings = WordEmbeddings(size=embedding_size, n_words=n_words)
self.locks = np.ones(n_words)
self.syn1 = np.zeros((n_words, embedding_size))
self.syn1neg = np.zeros((n_words, embedding_size))
def fit(self, documents):
"""
Train the Word2Vec model with the documents.
Paramaters:
-----------
documents : list(str)
the documents that the Word2Vec model is going to learn the embeddings from.
"""
n_words_trained = 0
tokens, self.vocab, data, self._frequencies, self.diction, self.reverse_diction = self._build_dataset(
documents)
n_tokens = len(tokens)
n_vocab = len(self.vocab)
words_per_epoch = n_vocab / self.n_epochs
self._cum_dist = self._build_cum_dist()
def _build_dataset(self, documents):
"""Preprocesses the documents and creates the dataset for fitting."""
# Get the term frequencies without idf
tfidf = TfIdf(do_idf=False, preprocessor=self.preprocessor, n_words=self.n_words)
tfidf.fit(documents)
# Flatten the document tokens to create one long list
tokens = list(np.hstack(np.array(tfidf.document_tokens)))
# Create the vocab list with 'UNK' for vocab that couldn't make the vocab list
vocab = tfidf.vocab
vocab_set = set(vocab)
diction = {token: index for index, token in enumerate(vocab)}
reverse_diction = dict(zip(diction.values(), diction.keys()))
# Turn the long token list into a index references to the diction
data = list(map(lambda token: diction[token]
if token in vocab_set else 0, tokens))
# Get the frequencies of tokens and add the frequency of 'UNK' at the beginning
# frequencies = np.insert(tfidf.total_term_freq, 0, data.count(0))[:self.n_words]
frequencies = tfidf.total_term_freq[:self.n_words]
return tokens, vocab, data, frequencies, diction, reverse_diction
def _build_cum_dist(self, distortion=0.75, domain=2**31 - 1):
freq_total = np.sum(self._frequencies ** distortion)
cum_dist = np.cumsum(self._frequencies) * domain / freq_total
return cum_dist
def _train(self, data, optimizer, loss):
"""Train the model."""
start_index = 0
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
self._sess = sess
self._sess.run(init_op)
for epoch in range(self.n_epochs):
self._train_one_epoch(data, optimizer, loss)
print("Epoch:", (epoch + 1))
self.embeddings = self._embeddings.eval()
print("\nTraining complete!")
def _train_one_example(self, example, label, alpha):
predict_word = model.wv.vocab[word] # target word (NN output)
# input word (NN input/projection layer)
example_index = self._diction[example]
embedding = self.embeddings.vectors[example_index]
lock = self.locks[example_index]
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
# 2d matrix, codelen x layer1_size
l2a = deepcopy(self.syn1[predict_word.point])
prod_term = np.dot(embedding, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
# vector of error gradients multiplied by the learning rate
ga = (1 - predict_word.code - fa) * alpha
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
sgn = (-1.0)**predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
self._total_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(
model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
# vector of error gradients multiplied by the learning rate
gb = (model.neg_labels - fb) * alpha
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
# loss component corresponding to negative sampling
if compute_loss:
# for the sampled words
self._total_loss -= sum(log(expit(-1 * prod_term[1:])))
# for the output word
self._total_loss -= log(expit(prod_term[0]))
if learn_vectors:
# learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
embedding += neu1e * lock_factor
def _train_one_epoch(self, data, optimizer, loss):
"""Train one epoch with workers."""
# Each worker generates a batch and trains it until posion pill
def worker_duty():
"""The duty of a single worker."""
while True:
batch = queue.get()
if batch is None:
break
examples, labels, alphas = batch
for example, label, alpha in batch:
self._train_one_example(example, label, alpha)
def generate_batch():
"""Create a batch for a training step in Word2Vec."""
# Initialize variables
example = np.zeros(self.batch_size)
labels = np.zeros((self.batch_size, 1))
alphas = np.zeros(self.batch_size)
n_items = 0
index = 0
while index < len(data):
reduced_window = random.randint(0, self.window_size)
if data[index] is not None:
left = max(0, index - self.window_size + reduced_window)
right = min((index + self.window_size + 1 -
reduced_window), len(data) - 1)
for pos2 in range(left, right, 1):
if n_items == self.batch_size:
queue.put((example, labels, index))
example = np.zeros(self.batch_size)
labels = np.zeros((self.batch_size, 1))
n_items = 0
if pos2 != index and data[pos2] is not None:
example[n_items] = data[pos2]
labels[n_items] = data[index]
alpha = self.learning_rate - \
(self.learning_rate - 0.001) * (index / self.n_words)
alphas[n_items] = max(0.001, alpha)
n_items += 1
index += 1
# Poison pills
for _ in range(n_workers):
queue.put(None)
# Create a threadsafe queue to store the batch indexes
queue = multiprocessing.Queue(maxsize=2 * self.n_workers)
# Create and run the threads
workers = [threading.Thread(target=generate_batch)]
workers.extend([threading.Thread(target=worker_duty)
for _ in range(self.n_workers - 1)])
for worker in workers:
worker.start()
for thread in workers:
thread.join()
def most_similar_words(self, word, n_words=5, include_similarity=False):
"""
Get the most similar words to a word.
Paramaters:
-----------
word : list(str)
The word that is the point of intrest.
n_words : int
The number of words that is going to be returned.
include_similarity : bool
If to include the similarity score as part of a tuple next to the words.
Return:
-------
similar_words : list(str) or list(tuple(str, float))
The words that are most similar to the word according to the trained
embeddings.
"""
if word in self.vocab:
token_id = self.diction[word]
tiled_embedding = np.tile(self.embeddings[token_id], (self.n_words, 1))
embedding_similarities = self._dist_metric(tiled_embedding, self.embeddings)
most_similar_token_ids = (-embedding_similarities).argsort()
return list(map(lambda token_id: self.reverse_diction[token_id], most_similar_token_ids))
else:
print('not in vocab')
def save_model(self, model_name, file_type=FileType.csv, safe=True, directory_path=None):
"""
Save the fitted model.
Paramaters:
-----------
model_name : str
The model name (also the file name) of the model is going to be saved under.
file_type : FileType
The file type that the model is going to be saved as.
Return:
-------
saved : bool
If the model is saved successfully or not.
"""
if self.embeddings is None:
return False
data = pd.DataFrame(self.embeddings.T)
data.columns = self.vocab
if directory_path:
file_saver = FileSaver(directory_path=directory_path)
else:
file_saver = FileSaver()
return file_saver.save(data, model_name, file_type=file_type, safe=safe)
def load_model(self, model_name, file_type=FileType.csv, directory_path=None):
if directory_path:
file_fetcher = FileFetcher(directory_path=directory_path)
else:
file_fetcher = FileFetcher()
self.n_words += 1
data = file_fetcher.load(model_name, file_type)
self.embeddings = data.as_matrix().T
self.vocab = data.columns.tolist()
self.diction = {token: index for index, token in enumerate(self.vocab)}
self.reverse_diction = dict(zip(self.diction.values(), self.diction.keys()))
| mit |
ttchin/FaceDetected | FaceTrain.py | 1 | 8343 | #! encoding: UTF-8
#%%
from __future__ import print_function
import random
import numpy as np
from sklearn.cross_validation import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
from keras.utils import plot_model
from FaceInput import extract_data, resize_with_pad, IMAGE_SIZE
import os
class Dataset(object):
def __init__(self):
self.X_train = None
self.X_valid = None
self.X_test = None
self.Y_train = None
self.Y_valid = None
self.Y_test = None
def read(self, nb_classes, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE, img_channels=3):
images, labels = extract_data('./train/')
labels = np.reshape(labels, [-1])
# numpy.reshape
X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.3, random_state=random.randint(0, 100))
X_valid, X_test, y_valid, y_test = train_test_split(images, labels, test_size=0.5, random_state=random.randint(0, 100))
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_valid = X_valid.reshape(X_valid.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# the data, shuffled and split between train and test sets
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_valid.shape[0], 'valid samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_valid = np_utils.to_categorical(y_valid, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_valid = X_valid.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_valid /= 255
X_test /= 255
self.X_train = X_train
self.X_valid = X_valid
self.X_test = X_test
self.Y_train = Y_train
self.Y_valid = Y_valid
self.Y_test = Y_test
class Model(object):
FILE_PATH = './model/model.h5'
def __init__(self):
self.model = None
def build_model(self, nb_classes):
self.model = Sequential()
self.model.add(
Convolution2D(
filters=32,
kernel_size=(5, 5),
padding='same',
dim_ordering='tf',
input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
name="1"
)
)
self.model.add(Activation('relu',name="2"))
self.model.add(
MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2),
padding='same',
name="3"
)
)
self.model.add(Convolution2D(filters=64, kernel_size=(5, 5), padding='same', name='4'))
self.model.add(Activation('relu',name='5'))
self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',name='6'))
self.model.add(Flatten(name='7'))
self.model.add(Dense(512,name="8"))
self.model.add(Activation('relu',name='9'))
self.model.add(Dense(nb_classes,name='10'))
self.model.add(Activation('softmax',name='11'))
self.model.summary()
def train(self, dataset, batch_size=20, nb_epoch=1, data_augmentation=True):
self.dataset = dataset
self.model.compile(
optimizer='adam', #有很多可选的optimizer,例如RMSprop,Adagrad,你也可以试试哪个好,我个人感觉差异不大
loss='categorical_crossentropy', #你可以选用squared_hinge作为loss看看哪个好
metrics=['accuracy'])
if not data_augmentation:
print('Not using data augmentation.')
self.model.fit(dataset.X_train, dataset.Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(dataset.X_valid, dataset.Y_valid),
shuffle=True)
#epochs、batch_size为可调的参数,epochs为训练多少轮、batch_size为每次训练多少个样本
#self.model.fit(self.dataset.X_train,self.dataset.Y_train,nb_epoch,batch_size)
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(dataset.X_train)
# fit the model on the batches generated by datagen.flow()
self.model.fit_generator(datagen.flow(dataset.X_train, dataset.Y_train,
batch_size=batch_size),
samples_per_epoch=dataset.X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(dataset.X_valid, dataset.Y_valid))
def save(self, file_path=FILE_PATH):
print('Model Saved.')
self.model.save(file_path)
def load(self, file_path=FILE_PATH):
print('Model Loaded.')
self.model = load_model(file_path)
def predict(self, image):
if K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
image = resize_with_pad(image)
image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
image = image.astype('float32')
image /= 255
result = self.model.predict_proba(image)
print(result)
max_index = np.argmax(result)
return max_index,result[0][max_index]
def evaluate(self, dataset):
score = self.model.evaluate(dataset.X_test, dataset.Y_test, verbose=0)
print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
def getTrainCfg(self):
array=[]
with open("./model/train.cfg", 'r') as fread:
lines = fread.readlines()
for name in lines:
array.append(name.strip("\n"))
print(array)
return enumerate(array)
def generateTrainCfg(self):
with open("./model/train.cfg", "w+") as fwrite:
for dir in os.listdir("train"):
if not dir.startswith("."): # avoid hidden folder like .DS_Store
fwrite.write("{}\n".format(dir))
if __name__ == '__main__':
count = 0
for dir in os.listdir("train"):
count +=1
print("%d type objects need to be trained" % count)
dataset = Dataset()
dataset.read(count)
model = Model()
model.build_model(count)
model.train(dataset, data_augmentation=False)
model.save()
model = Model()
model.load()
model.evaluate(dataset)
model.generateTrainCfg()
print("Model training completed.")
| mit |
alpha-beta-soup/errorgeopy | errorgeopy/utils.py | 1 | 14060 | """Utility functions for ErrorGeoPy. Inteded to be private functions, their
call signatures are not considered strictly static.
.. moduleauthor Richard Law <richard.m.law@gmail.com>
"""
import numpy as np
from collections import namedtuple
from functools import partial, wraps
from itertools import compress
import inspect
import geopy
from geopy.point import Point as GeopyPoint
from shapely.geometry import Point, MultiPoint, Polygon
from scipy.spatial import Delaunay
from scipy.spatial.distance import pdist, squareform
from sklearn.cluster import MeanShift, AffinityPropagation, DBSCAN, estimate_bandwidth
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn import metrics
from sklearn.metrics.pairwise import euclidean_distances
import pyproj
from errorgeopy.smallestenclosingcircle import make_circle
def check_location_type(func):
"""Decorator for checking that the first argument of a function is an array
of geopy.Location objects. Raises ValueError is this condition is not met.
"""
@wraps(func)
def inner(*args, **kwargs):
if not all(map(lambda x: isinstance(x, geopy.Location), args[1])):
raise ValueError
else:
return func(*args, **kwargs)
return inner
def check_addresses_exist(func):
"""Decorator for checking that the first argument of a function has an
addresses property
"""
@wraps(func)
def inner(*args, **kwargs):
if not args[0].addresses:
return None
else:
return func(*args, **kwargs)
return inner
def geopy_point_to_shapely_point(point):
"""Converts a geopy.point.Point to a shapely.geometry.Point.
Args:
point (geopy.point.Point)
"""
if not isinstance(point, GeopyPoint):
raise TypeError
return Point(point.longitude, point.latitude, point.altitude)
def array_geopy_points_to_shapely_points(points):
"""Converts an array of geopy.point.Point objects to an array of
shapely.geometry.Point objects.
Args:
points (sequence of geopy.point.Point objects)
"""
return [geopy_point_to_shapely_point(p) for p in points]
def array_geopy_points_to_xyz_tuples(points):
"""Converts an array of geopy.point.Point objects to an array of
(x, y, z) tuples.
Args:
points (sequence of geopy.point.Point objects)
"""
return [geopy_point_to_shapely_point(p).coords[0] for p in points]
def sq_norm(v):
return np.linalg.norm(v)**2
def circumcircle(points, simplex):
"""Computes the circumcentre and circumradius of a triangle:
https://en.wikipedia.org/wiki/Circumscribed_circle#Circumcircle_equations
"""
A = [points[simplex[k]] for k in range(3)]
M = np.asarray(
[[1.0] * 4] +
[[sq_norm(A[k]), A[k][0], A[k][1], 1.0] for k in range(3)],
dtype=np.float32)
S = np.array([
0.5 * np.linalg.det(M[1:, [0, 2, 3]]),
-0.5 * np.linalg.det(M[1:, [0, 1, 3]])
])
a = np.linalg.det(M[1:, 1:])
b = np.linalg.det(M[1:, [0, 1, 2]])
centre, radius = S / a, np.sqrt(b / a + sq_norm(S) / a**2)
return centre, radius
def get_alpha_complex(alpha, points, simplexes):
"""Obtain the alpha shape.
Args:
alpha (float): the paramter for the alpha shape
points: data points
simplexes: the list of indices that define 2-simplexes in the Delaunay
triangulation
"""
return filter(lambda simplex: circumcircle(points, simplex)[1] < alpha,
simplexes)
def concave_hull(points, alpha, delunay_args=None):
"""Computes the concave hull (alpha-shape) of a set of points.
"""
delunay_args = delunay_args or {
'furthest_site': False,
'incremental': False,
'qhull_options': None
}
triangulation = Delaunay(np.array(points))
alpha_complex = get_alpha_complex(alpha, points, triangulation.simplices)
X, Y = [], []
for s in triangulation.simplices:
X.append([points[s[k]][0] for k in [0, 1, 2, 0]])
Y.append([points[s[k]][1] for k in [0, 1, 2, 0]])
poly = Polygon(list(zip(X[0], Y[0])))
for i in range(1, len(X)):
poly = poly.union(Polygon(list(zip(X[i], Y[i]))))
return poly
def point_nearest_point(points, point):
"""Returns the shapely.geometry.Point in <points> that is nearest <point>.
"""
distances = [point.distance(p) for p in points]
return points[distances.index(min(distances))]
def cross(o, a, b):
"""2D cross product of OA and OB vectors, i.e. z-component of their 3D cross
product.
Returns:
A positive value, if OAB makes a counter-clockwise turn,
negative for clockwise turn, and zero if the points are collinear.
"""
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain#Python
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Takes an iterable sequence of hashable (x, y, ...) tuples representing the
points. Only the (x, y) pairs are used, so output is in two-dimensions.
Outputs a shapely.geometry.Polygon representing the convex hull, in
counter-clockwise order, starting from the vertex with the lexicographically
smallest coordinates. Implements Andrew's monotone chain algorithm.
O(n log n) complexity.
"""
# Convert, sort the points lexicographically, and remove duplicates
points = sorted(set(points))
if len(points) <= 1:
return points
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the
# beginning of the other list.
# Input to Polygon is a list of vertices in counter-clockwise order,
# starting at the point with the lexicographically smallest coordinates
return Polygon(lower[:-1] + upper[:-1])
def minimum_bounding_circle(points):
"""Returns the minimum bounding circle of a set of points as a
shapely.geometry.Polygon (64-sided polygon approximating a circle)
"""
# TODO using cartesian coordinates, not geographic
mbc = make_circle(points)
if not mbc:
return None
x, y, radius = mbc
return Point(x, y).buffer(radius)
def cluster_named_tuple():
"""Defines a NamedTuple representing a single cluster.
Returns:
Named tuple with the following properties:
label (int): the id of the cluster
centroid: Point representing the cluster centre
geom (MultiPoint or Point): the cluster members
location (errorgeopy.Location): one cluster from the input set
"""
return namedtuple('Cluster', ['label', 'centroid', 'location'])
def mean_shift(location, location_callback, bandwidth=None):
"""Returns one or more clusters of a set of points, using a mean shift
algorithm.
The result is sorted with the first value being the largest cluster.
Kwargs:
bandwidth (float): If bandwidth is None, a value is detected
automatically from the input using estimate_bandwidth.
Returns:
A list of NamedTuples (see get_cluster_named_tuple for a definition
of the tuple).
"""
pts = location._tuple_points()
if not pts:
return None
X = np.array(pts).reshape((len(pts), len(pts[0])))
if np.any(np.isnan(X)) or not np.all(np.isfinite(X)):
return None
X = Imputer().fit_transform(X)
X = X.astype(np.float32)
if not bandwidth:
bandwidth = estimate_bandwidth(X, quantile=0.3)
ms = MeanShift(bandwidth=bandwidth or None, bin_seeding=False).fit(X)
clusters = []
for cluster_id, cluster_centre in enumerate(ms.cluster_centers_):
locations = []
for j, label in enumerate(ms.labels_):
if not label == cluster_id:
continue
locations.append(location.locations[j])
if not locations:
continue
clusters.append(cluster_named_tuple()(label=cluster_id,
centroid=Point(cluster_centre),
location=location_callback(
locations)))
return clusters
def affinity_propagation(location, location_callback):
"""Returns one or more clusters of a set of points, using an affinity
propagation algorithm.
The result is sorted with the first value being the largest cluster.
Returns:
A list of NamedTuples (see get_cluster_named_tuple for a definition
of the tuple).
"""
pts = location._tuple_points()
if not pts:
return None
X = np.array(pts).reshape((len(pts), len(pts[0])))
if np.any(np.isnan(X)) or not np.all(np.isfinite(X)):
return None
X = Imputer().fit_transform(X)
X = X.astype(np.float32)
afkwargs = {
'damping': 0.5,
'convergence_iter': 15,
'max_iter': 200,
'copy': True,
'preference': None,
'affinity': 'euclidean',
'verbose': False
}
af = AffinityPropagation(**afkwargs).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
clusters = []
for cluster_id, cluster_centre in enumerate(af.cluster_centers_):
locations = []
for j, label in enumerate(af.labels_):
if not label == cluster_id:
continue
locations.append(location.locations[j])
if not locations:
continue
clusters.append(cluster_named_tuple()(label=cluster_id,
centroid=Point(cluster_centre),
location=location_callback(
locations)))
return clusters
def dbscan(location, location_callback, core_only=False, epsilon=1, **kwargs):
"""Returns one or more clusters of a set of points, using a DBSCAN
algorithm.
The result is sorted with the first value being the largest cluster.
Returns:
A list of NamedTuples (see get_cluster_named_tuple for a definition
of the tuple).
Notes:
This implementation is not strictly correct (improved method explained
here http://geoffboeing.com/2014/08/clustering-to-reduce-spatial-data-set-size/),
but good enough for now.
The centre of a cluster computed with DBSCAN is not meaningful (because
they are irregularly-shaped), so centroids are determined as a point
in the cluster that is nearest the geometric centre of the cluster,
rather than merely the geometric centre.
"""
# TODO pretty sure the output of this is not sorted...
# TODO I don't know why, but in tests this raises
# errorgeopy/.tox/py35/lib/python3.5/site-packages/sklearn/utils/validation.py:386: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and willraise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
# Even though all methods using 2D arrays
pts = [p[0:2] for p in location._tuple_points()]
if not pts or len(pts) == 1:
return None
pts = np.array(pts)
# print(pts.ndim)
X = squareform(pdist(pts, metric='cityblock'))
if np.any(np.isnan(X)) or not np.all(np.isfinite(X)):
return None
X = StandardScaler().fit_transform(X)
# print(X.ndim)
# X = np.atleast_2d(X)
dbkwargs = {
'eps': epsilon,
'min_samples': 1,
'metric': euclidean_distances,
# 'metric': 'haversine',
'algorithm': 'brute',
'leaf_size': None
}
db = DBSCAN(**dbkwargs).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
clusters = []
for k in set(labels):
class_member_mask = (labels == k)
if k == -1:
# Noise
continue
_filter = class_member_mask if not core_only else class_member_mask & core_samples_mask
_pts = list(map(Point, pts[_filter]))
centroid = MultiPoint(_pts).centroid
if len(pts) == 0 and _pts == centroid:
centroid = _pts[0]
else:
get_dist = lambda x: x.distance(centroid)
distances = list(map(get_dist, _pts))
centroid = _pts[distances.index(min(distances))]
clusters.append(cluster_named_tuple()(
label=k,
centroid=centroid,
location=location_callback(
list(compress(location.locations, _filter)))))
return clusters
def get_clusters(location, location_callback, method=dbscan, **kwargs):
return method(location, location_callback, **kwargs)
def long_substr(data):
"""Find the longest substring, given a sequence of strings."""
if not data:
return None
if len(data) == 1:
return data[0]
substr = ''
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and all(data[0][i:i + j] in x for x in data):
substr = data[0][i:i + j]
return substr.strip()
def get_proj(epsg):
"""Returns a pyproj partial representing a projedction from WGS84 to
a given projection.
Args:
epsg: EPSG code for the target projection.
"""
project = partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(init='EPSG:{epsg}'.format(epsg=epsg)))
| mit |
kmunve/pysenorge | pysenorge/io/bil.py | 1 | 5484 | __docformat__ = "reStructuredText"
'''
Binary (.bil) input/output class.
:Author: kmu
:Created: 14. okt. 2010
'''
# Built-in
import os, sys
sys.path.append(os.path.abspath('../..'))
# Additional
from numpy import zeros, fromfile, int8, int16, uint8, uint16, float32, nan
# Own
from pysenorge.converters import get_FillValue
class BILdata(object):
'''
Class for reading, writing and displaying BIL format files for
*www.senorge.no*.
The seNorge array has a standard size of height=1550, width=1195.
The standard data-type is "uint16" with a no-data-value of 65535.
The standard file name is of type "themename_YYYY_MM_DD.bil"
'''
def __init__(self, filename, datatype):
'''
Initializes defaults.
'''
self.nrows = 1550
self.ncols = 1195
self.datatype = eval(datatype)
self.nodata = get_FillValue(self.datatype)
self.data = zeros((self.nrows, self.ncols), self.datatype)
self.filename = filename
def _set_dimension(self, nrows, ncols):
'''
Override standard dimensions.
:Parameters:
- nrows: Number of new rows
- ncols: Number of new columns
'''
self.nrows = nrows
self.ncols = ncols
self.data = zeros((self.nrows, self.ncols), self.datatype)
def read(self):
"""
Reads data from BIL file.
"""
print "Reading %s" % self.filename
# self._read_hdr()
fid = open(self.filename, "rb")
tmpdata = fromfile(fid, self.datatype)
fid.close()
tmpdata.shape = (self.nrows, self.ncols)
self.data[:] = tmpdata
# self._get_mask()
def write(self, data):
'''
Writes data to BIL file.
:Parameters:
- data: *numpy* array to be stored
'''
# Make sure data is in appropriate format
if self.data.dtype == data.dtype:
# Open and write to file
fid = open(self.filename, 'wb')
fid.write(data)
fid.flush()
fid.close()
self.data = data
return "Data written to %s" % self.filename
else:
print "Inconsistent data-type for BIL format."
def _read_hdr(self):
"""
Reads header information from *.hdr* file (if existent).
"""
hdr = os.path.splitext(self.filename)[0] + '.hdr'
if os.path.exists(hdr):
# Verify the information
fid = open(hdr, 'r')
lines = fid.readlines()
byteorder = int(lines[0].split('\t')[1].strip())
nrows = int(lines[2].split('\t')[1].strip())
ncols = int(lines[3].split('\t')[1].strip())
print "BYTEORDER: %i \nNROWS: %i \nNCOLS: %i\n" % (byteorder, nrows, ncols)
self._set_dimension(nrows, ncols)
else:
print "No header data found! Using default..."
def _write_hdr(self):
'''
Creates a I{.hdr} file to the corresponding I{.bil} file.
Example content of .hdr file
BYTEORDER I
LAYOUT BIL
NROWS 1550
NCOLS 1195
NBANDS 1
NBITS 16
BANDROWBYTES 2390
TOTALROWBYTES 2390
BANDGAPBYTES 0
'''
hdr = os.path.splitext(self.filename)[0] + '.hdr'
fid = open(hdr, 'w')
fid.write('BYTEORDER\t%s\n' % sys.byteorder)
fid.write('LAYOUT\tBIL\n')
fid.write('NROWS\t%i\n' % self.nrows)
fid.write('NCOLS\t%i\n' % self.ncols)
fid.write('NBANDS\t1\n')
fid.write('NBITS\t16\n')
fid.write('BANDROWBYTES\t2390\n')
fid.write('TOTALROWBYTES\t2390\n')
fid.write('BANDGAPBYTES\t0\n')
fid.flush()
fid.close()
def _view(self):
"""
Plot the data contained in the BIL file.
:Requires: *Matplotlib* module
"""
try:
import matplotlib.pyplot as plt
from matplotlib.cm import jet
from pysenorge.grid import senorge_mask
mask = senorge_mask()
data = float32(self.data)
data[mask] = nan
plt.figure(facecolor='lightgrey')
plt.imshow(data, interpolation='nearest', cmap=jet, alpha=1.0,
# vmin=-1, vmax=0
)
plt.colorbar()
plt.show()
except ImportError:
print '''Required plotting module "matplotlib" not found!\nVisit www.matplotlib.sf.net'''
def _hist(self):
"""
Histogram of the data contained in the BIL file.
:Requires: *Matplotlib* module
"""
try:
import matplotlib.pyplot as plt
plt.hist(self.data, 30, histtype='barstacked', align='left')
plt.show()
except ImportError:
print '''Required plotting module "matplotlib" not found!\nVisit www.matplotlib.sf.net'''
if __name__ == "__main__":
pass | gpl-3.0 |
MonoCloud/zipline | zipline/utils/tradingcalendar_tse.py | 17 | 10125 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end, canonicalize_datetime, \
get_open_and_closes
start = pd.Timestamp('1994-01-01', tz='UTC')
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
new_years_saturday = rrule.rrule(
rrule.MONTHLY,
byyearday=3,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_saturday)
# Family day in Ontario, starting in 2008, third monday of February
family_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=datetime(2008, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(family_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
# Monday prior to May 25th.
victoria_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=rrule.MO,
bymonthday=[24, 23, 22, 21, 20, 19, 18],
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(victoria_day)
july_1st = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_1st)
july_1st_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_1st_sunday)
july_1st_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_1st_saturday)
civic_holiday = rrule.rrule(
rrule.MONTHLY,
bymonth=8,
byweekday=rrule.MO(1),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(civic_holiday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=10,
byweekday=(rrule.MO(2)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
# If Christmas is a Sunday then the 26th, a Monday is observed.
# (but that would be boxing day), so the 27th is also observed.
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=27,
byweekday=rrule.TU,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then the 27th, a monday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=27,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
boxing_day = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day)
# if boxing day is a sunday, the Christmas was saturday.
# Christmas is observed on the 27th, a month and boxing day is observed
# on the 28th, a tuesday.
boxing_day_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=28,
byweekday=rrule.TU,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day_sunday)
# If boxing day is a Saturday then the 28th, a monday is observed.
boxing_day_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=28,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# The TSX was open for 71 minutes on September 11, 2011.
# It was closed on the 12th and reopened on the 13th.
# http://www.cbc.ca/news2/interactives/map-tsx/
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
non_trading_days.append(
datetime(2001, 9, 12, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
# Days in Environment but not in Calendar (using ^GSPTSE as bm_symbol):
# --------------------------------------------------------------------
# Used http://web.tmxmoney.com/pricehistory.php?qm_page=61468&qm_symbol=^TSX
# to check whether exchange was open on these days.
# 1994-07-01 - July 1st, Yahoo Finance has Volume = 0
# 1996-07-01 - July 1st, Yahoo Finance has Volume = 0
# 1996-08-05 - Civic Holiday, Yahoo Finance has Volume = 0
# 1997-07-01 - July 1st, Yahoo Finance has Volume = 0
# 1997-08-04 - Civic Holiday, Yahoo Finance has Volume = 0
# 2001-05-21 - Victoria day, Yahoo Finance has Volume = 0
# 2004-10-11 - Closed, Thanksgiving - Confirmed closed
# 2004-12-28 - Closed, Boxing Day - Confirmed closed
# 2012-10-08 - Closed, Thanksgiving - Confirmed closed
# Days in Calendar but not in Environment using ^GSPTSE as bm_symbol:
# --------------------------------------------------------------------
# Used http://web.tmxmoney.com/pricehistory.php?qm_page=61468&qm_symbol=^TSX
# to check whether exchange was open on these days.
# 2000-06-28 - No data this far back, can't confirm
# 2000-08-28 - No data this far back, can't confirm
# 2000-08-29 - No data this far back, can't confirm
# 2001-09-11 - TSE Open for 71 min.
# 2002-02-01 - Confirm TSE Open
# 2002-06-14 - Confirm TSE Open
# 2002-07-02 - Confirm TSE Open
# 2002-11-11 - TSX website has no data for 2 weeks in 2002
# 2003-07-07 - Confirm TSE Open
# 2003-12-16 - Confirm TSE Open
def get_early_closes(start, end):
# TSX closed at 1:00 PM on december 24th.
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
Paul-St-Young/share | algorithms/iso3d/hf/chf.py | 1 | 3892 | #!/usr/bin/env python
import os
import numpy as np
def show_moR(moR):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # enable 3D projection
from qharv.inspect import volumetric,crystal
fig = plt.figure()
iplot = 0
for iorb in mo_to_plot:
iplot += 1
val = moR[:,iorb].reshape(2*dgs+1)
fval= volumetric.spline_volumetric(val)
grid= volumetric.axes_func_on_grid3d(axes,fval,grid_shape)
myup = default_up
mydn = default_dn
if iorb in up_dn_map.keys():
myup = up_dn_map[iorb]['up']
mydn = up_dn_map[iorb]['dn']
# end if
ax = fig.add_subplot(2,2,iplot,projection='3d')
crystal.draw_cell(ax,axes*grid_shape/alat0,pos*grid_shape/alat0,'y')
ax.set_title('orb %d' % iorb)
if myup >0:
meshm = volumetric.isosurf(ax,grid,level_frac=myup)
meshm.set_facecolor('#3498db')
meshm.set_edgecolor('#34495e')
if mydn >0:
meshp = volumetric.isosurf(ax,grid,level_frac=mydn)
meshp.set_facecolor('#fb9a99')
meshp.set_edgecolor('#e11a1c')
# end for
plt.show()
if __name__ == '__main__':
import sys
sys.path.insert(0,'../basis')
from basis import bfd_basis
from pyscf.pbc import gto
from pyscf.pbc.scf import RHF
from qharv.cross import pqscf
mygs = 16 # grid density
# grid density for visualization
myvgs = 6
vgs = np.array([myvgs]*3)
grid_shape = 2*vgs + 1
# define isosurface levels
default_up = 0.75
default_dn = 0.25
up_dn_map = {
0:{'up':0.9,'dn':-.1},
1:{'up':0.6,'dn':0.3},
3:{'up':0.65,'dn':0.3},
}
chkfile_fname = 'bfd.h5'
moR_fname = 'moR.dat'
rho_fname = 'rho.dat'
alat0 = 3.6
axes = (np.ones((3,3))-np.eye(3))*alat0/2.0
elem = ['C','C']
pos = np.array([[0,0,0],[0.5,0.5,0.5]])*alat0
atoms = pqscf.atom_text(elem,pos)
gs = np.array([mygs]*3)
basis = bfd_basis()
cell = gto.M(a=axes,atom=atoms,verbose=3
,gs=gs,pseudo={'C':'bfd'},basis=basis)
mf = RHF(cell)
mf.chkfile = chkfile_fname
mf.conv_tol = 1e-6
# run or load RHF
if os.path.isfile(chkfile_fname):
from pyscf import lib
mf.__dict__.update(lib.chkfile.load(chkfile_fname,'scf'))
else:
mf.kernel()
# end if
# grid density for molecular orbital
mydgs = 16
dgs = np.array([mydgs]*3)
moR_fname = 'gs%d_'%mydgs+moR_fname
# run or load moR
if os.path.isfile(moR_fname):
moR = np.loadtxt(moR_fname)
else:
from pyscf.pbc.gto.cell import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
coords = gen_uniform_grids(cell,gs=dgs)
aoR = eval_ao(cell,coords)
moR = np.dot(aoR,mf.mo_coeff)
np.savetxt(moR_fname,moR)
# end if
mo_to_plot = [0,1,3,4]
assert len(mo_to_plot) == 4
#show_moR(moR)
# add up occupied orbitals for electron density
rho_fname = 'gs%d_'%mydgs+rho_fname
if os.path.isfile(rho_fname):
rho = np.loadtxt(rho_fname)
else:
from pyscf.pbc.gto.cell import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
coords = gen_uniform_grids(cell,gs=dgs)
aoR = eval_ao(cell,coords)
dm = mf.make_rdm1(mf.mo_coeff, mf.mo_occ)
rho = np.dot(aoR, np.diag(dm) )
np.savetxt(rho_fname,rho)
# end if
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # enable 3D projection
from qharv.inspect import volumetric,crystal
fig = plt.figure()
fval= volumetric.spline_volumetric(rho.reshape(2*dgs+1) )
grid= volumetric.axes_func_on_grid3d(axes,fval,grid_shape)
ax = fig.add_subplot(1,1,1,projection='3d')
crystal.draw_cell(ax,axes*grid_shape/alat0,pos*grid_shape/alat0,'y')
meshm = volumetric.isosurf(ax,grid,level_frac=0.3)
meshm.set_facecolor('#3498db')
meshm.set_edgecolor('#34495e')
meshp = volumetric.isosurf(ax,grid,level_frac=0.75)
meshp.set_facecolor('#fb9a99')
meshp.set_edgecolor('#e11a1c')
plt.show()
# end __main__
| mit |
LouisePaulDelvaux/openfisca-france-data | openfisca_france_data/input_data_builders/build_openfisca_indirect_taxation_survey_data/step_0_4_homogeneisation_revenus_menages.py | 1 | 15961 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import pandas
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.temporary import TemporaryStore
log = logging.getLogger(__name__)
temporary_store = TemporaryStore.create(file_name = "indirect_taxation_tmp")
def build_homogeneisation_revenus_menages(year = None):
"""Build menage consumption by categorie fiscale dataframe """
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
# **********************************************************************************************************************
# ********************************* HOMOGENEISATION DES DONNEES SUR LES REVENUS DES MENAGES ****************************
# ************************************ CALCUL D'UN PROXI DU REVENU DISPONIBLE DES MENAGES ******************************
# **********************************************************************************************************************
#
# ********************HOMOGENEISATION DES BASES DE RESSOURCES***************************
# /* La base 95 permet de distinguer taxe d'habitation et impôts fonciers. On calcule leur montant relatif pour l'appliquer à 00 et 05 */
if year == 1995:
menrev = survey.get_values(
table = "menrev",
variables = [
'revtot', 'ir', 'irbis', 'imphab', 'impfon', 'revaid', 'revsal', 'revind', 'revsec', 'revret',
'revcho', 'revfam', 'revlog', 'revinv', 'revrmi', 'revpat', 'mena', 'ponderr'
],
)
menage = survey.get_values(
table = "socioscm",
variables = ['exdep', 'exrev', 'mena']
)
menage.set_index('mena')
menrev = menrev.merge(menage, left_index = True, right_index = True)
# cette étape de ne garder que les données dont on est sûr de la qualité et de la véracité
# exdep = 1 si les données sont bien remplies pour les dépenses du ménage
# exrev = 1 si les données sont bien remplies pour les revenus du ménage
menrev = menrev[(menrev.exdep == 1) & (menrev.exrev == 1)]
menrev['foncier_hab'] = menrev.imphab + menrev.impfon
menrev['part_IMPHAB'] = menrev.imphab / menrev.foncier_hab
menrev['part_IMPFON'] = menrev.impfon / menrev.foncier_hab
menrev['revsoc'] = (
menrev.revret + menrev.revcho + menrev.revfam + menrev.revlog + menrev.revinv + menrev.revrmi
)
for variable in ['revcho', 'revfam', 'revinv', 'revlog', 'revret', 'revrmi']:
del menrev[variable]
menrev['revact'] = menrev['revsal'] + menrev['revind'] + menrev['revsec']
menrev.rename(
columns = dict(
revpat = "revpat",
impfon = "impfon",
imphab = "imphab",
revaid = "somme_obl_recue",
),
inplace = True
)
menrev['impot_revenu'] = menrev['ir'] + menrev['irbis']
rev_disp = survey.get_values(
table = "menrev",
variables = ['revtot', 'revret', 'revcho', 'revfam', 'revlog', 'revinv', 'revrmi', 'imphab', 'impfon', 'revaid', 'revsal', 'revind', 'revsec', 'revpat', 'mena', 'ponderr', 'ir','irbis' ],
)
rev_disp.set_index('mena', inplace=True)
menage2 = survey.get_values(
table = "socioscm",
variables = ['exdep', 'exrev', 'mena']
)
menage2.set_index('mena', inplace = True)
rev_disp = menage2.merge(rev_disp, left_index = True, right_index = True)
rev_disp = rev_disp[(rev_disp.exrev == 1) & (rev_disp.exdep == 1)]
rev_disp['revsoc'] = rev_disp['revret'] + rev_disp['revcho'] + rev_disp['revfam'] + rev_disp['revlog'] + rev_disp['revinv'] + rev_disp['revrmi']
rev_disp['impot_revenu'] = rev_disp['ir'] + rev_disp['irbis']
rev_disp.rename(
columns = dict(
revaid = 'somme_obl_recue',
),
inplace = True
)
rev_disp.somme_obl_recue = rev_disp.somme_obl_recue.fillna(0)
rev_disp['revact'] = rev_disp['revsal'] + rev_disp['revind'] + rev_disp['revsec']
rev_disp['revtot'] = rev_disp['revact'] + rev_disp['revpat'] + rev_disp['revsoc'] + rev_disp['somme_obl_recue']
rev_disp['revact'] = rev_disp['revsal'] + rev_disp['revind'] + rev_disp['revsec']
rev_disp.rename(
columns = dict(
ponderr = "pondmen",
mena = "ident_men",
revind = "act_indpt",
revsal = "salaires",
revsec = "autres_rev",
),
inplace = True
)
rev_disp['autoverses'] = '0'
rev_disp['somme_libre_recue'] = '0'
rev_disp['autres_ress'] = '0'
#
# /* Le revenu disponible se calcule à partir de revtot à laquelle on retrancher la taxe d'habitation
# et l'impôt sur le revenu, plus éventuellement les CSG et CRDS.
# La variable revtot est la somme des revenus d'activité, sociaux, du patrimoine et d'aide. */
#
rev_disp['rev_disponible'] = rev_disp.revtot - rev_disp.impot_revenu - rev_disp.imphab
loyers_imputes = temporary_store['depenses_bdf_{}'.format(year)]
loyers_imputes.rename(
columns = {"0411": "loyer_impute"},
inplace = True,
)
rev_dispbis = loyers_imputes.merge(rev_disp, left_index = True, right_index = True)
rev_disp['rev_disp_loyerimput'] = rev_disp['rev_disponible'] - rev_dispbis['loyer_impute']
for var in ['somme_obl_recue', 'act_indpt', 'revpat', 'salaires', 'autres_rev', 'rev_disponible', 'impfon', 'imphab', 'revsoc', 'revact', 'impot_revenu', 'revtot', 'rev_disp_loyerimput'] :
rev_disp[var] = rev_disp[var] / 6.55957
# * CONVERSION EN EUROS
temporary_store["revenus_{}".format(year)] = rev_disp
elif year == 2000:
# TODO: récupérer plutôt les variables qui viennent de la table dépenses (dans temporary_store)
consomen = survey.get_values(
table = "consomen",
variables = ['c13141', 'c13111', 'c13121', 'c13131', 'pondmen', 'ident'],
)
rev_disp = consomen.sort(columns = ['ident'])
del consomen
menage = survey.get_values(
table = "menage",
variables = ['ident', 'revtot', 'revact', 'revsoc', 'revpat', 'rev70', 'rev71', 'revt_d', 'pondmen', 'rev10', 'rev11', 'rev20', 'rev21'],
).sort(columns = ['ident'])
revenus = menage.join(rev_disp, how = "outer", rsuffix = "rev_disp")
revenus.rename(
columns = dict(
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
rev70 = "somme_obl_recue",
rev71 = "somme_libre_recue",
revt_d= "autres_ress",
ident = "ident_men",
rev10 = "act_indpt",
rev11 = "autoverses",
rev20 = "salaires",
rev21 = "autres_rev",
),
inplace = True
)
var_to_ints = ['pondmen','impot_autres_res','impot_res_ppal','pondmenrev_disp','c13131']
for var_to_int in var_to_ints:
revenus[var_to_int] = revenus[var_to_int].astype(int)
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
loyers_imputes.index = loyers_imputes.index.astype('int')
revenus = revenus.set_index('ident_men')
revenus.index = revenus.index.astype('int')
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
var_to_ints = ['loyer_impute']
for var_to_int in var_to_ints:
revenus[var_to_int] = revenus[var_to_int].astype(int)
temporary_store["revenus_{}".format(year)] = revenus
elif year == 2005:
c05d = survey.get_values(
table = "c05d",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_men'],
)
rev_disp = c05d.sort(columns = ['ident_men'])
del c05d
menage = survey.get_values(
table = "menage",
variables = ['ident_men', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700_d', 'rev701_d',
'rev999_d', 'rev100_d', 'rev101_d', 'rev200_d', 'rev201_d'],
).sort(columns = ['ident_men'])
rev_disp.set_index('ident_men', inplace = True)
menage.set_index('ident_men', inplace = True)
revenus = pandas.concat([menage, rev_disp], axis = 1)
revenus.rename(
columns = dict(
rev100_d = "act_indpt",
rev101_d = "autoverses",
rev200_d = "salaires",
rev201_d = "autres_rev",
rev700_d = "somme_obl_recue",
rev701_d = "somme_libre_recue",
rev999_d = "autres_ress",
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
),
inplace = True
)
# * Ces pondérations (0.65 0.35) viennent de l'enquête BdF 1995 qui distingue taxe d'habitation et impôts fonciers. A partir de BdF 1995,
# * on a calculé que la taxe d'habitation représente en moyenne 65% des impôts locaux, et que les impôts fonciers en représentenr 35%.
# * On applique ces taux aux enquêtes 2000 et 2005.
# gen imphab= 0.65*(impot_res_ppal + impot_autres_res)
# gen impfon= 0.35*(impot_res_ppal + impot_autres_res)
# drop impot_autres_res impot_res_ppal
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
del revenus['impot_autres_res']
del revenus['impot_res_ppal']
# * Calculer le revenu disponible avec et sans le loyer imputé
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
temporary_store["revenus_{}".format(year)] = revenus
elif year == 2011:
try:
c05 = survey.get_values(
table = "C05",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_me'],
)
except:
c05 = survey.get_values(
table = "c05",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_me'],
)
rev_disp = c05.sort(columns = ['ident_me'])
del c05
try:
menage = survey.get_values(
table = "MENAGE",
variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'salaires'],
).sort(columns = ['ident_me'])
except:
menage = survey.get_values(
table = "menage",
variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'salaires'],
).sort(columns = ['ident_me'])
# variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'rev101_d', 'salaires', 'rev201'],
rev_disp.set_index('ident_me', inplace = True)
menage.set_index('ident_me', inplace = True)
revenus = pandas.concat([menage, rev_disp], axis = 1)
revenus.rename(
columns = dict(
revindep = "act_indpt",
#TODO: trouver ces revenus commentés dans bdf 2011
# rev101_d = "autoverses",
salaires = "salaires",
# rev201_d = "autres_rev",
rev700 = "somme_obl_recue",
rev701 = "somme_libre_recue",
rev999 = "autres_ress",
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
),
inplace = True
)
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
del revenus['impot_autres_res']
del revenus['impot_res_ppal']
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
temporary_store["revenus_{}".format(year)] = revenus
if __name__ == '__main__':
import sys
import time
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
deb = time.clock()
year = 2000
build_homogeneisation_revenus_menages(year = year)
log.info("step_0_4_homogeneisation_revenus_menages duration is {}".format(time.clock() - deb))
| agpl-3.0 |
KirstieJane/BrainsForPublication | scripts/show_cluster_in_volume.py | 3 | 18141 | #!/usr/bin/env python
#=============================================================================
# Created by Michael Notter
# at OHBM 2016 Brainhack in Lausanne, June 2016
# Edited with more comments by Kirstie Whitaker
# at Cambridge Brainhack-Global 2017, March 2017
# Contact: kw401@cam.ac.uk
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import argparse
import future # pip install future
from glob import glob as gg
import os
from os.path import join as opj
from os.path import basename as opb
import sys
import textwrap
import numpy as np
from matplotlib import pylab
from matplotlib import pyplot as plt
import nibabel as nb
import nilearn
from nipy.labs import viz
from scipy.ndimage import label as sci_label
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Also allows you to change some settings
'''
# Build a basic parser.
help_text = ('Show the locations of clusters in a statistical map in MNI space.')
sign_off = 'Author: Kirstie Whitaker <kw401@cam.ac.uk>'
parser = argparse.ArgumentParser(description=help_text,
epilog=sign_off,
formatter_class=argparse.RawTextHelpFormatter)
# Now add the arguments
parser.add_argument(dest='stats_file',
type=str,
metavar='stats_file',
help=textwrap.dedent('3D nifti file in MNI space containing the statistical values\n ' +
'you want to visualise.\n' +
'Note that this file can be pre-thresholded or not.\n' +
'If your file is already thresholded then you will need to\n ' +
'pass an argument to the -t option otherwise it will default to 2.3.\n ' +
'A suggested value is 0.01.' ))
parser.add_argument('-ce, --cluster_extent',
type=str,
metavar='cluster_extent',
help=textwrap.dedent("Minimum cluster extent for a region to be included in the visualisation\n (integer)\n Default: 20"),
default=20)
parser.add_argument('-t, --cluster_thr',
type=str,
metavar='threshold',
help=textwrap.dedent("Minimum statistical value for a region to be included in the visualisation\n (float)\n Default: 2.3"),
default=2.3)
parser.add_argument('--csv',
action='store_true',
help=textwrap.dedent('Create a csv file with cluster information.\n Default: False'),
default=False)
parser.add_argument('--cluster_title',
action='store_true',
help=textwrap.dedent('Show cluster information in the title of the plot.\n Default: False'),
default=False)
parser.add_argument('-c', '--cmap',
type=str,
metavar='cmap',
help=textwrap.dedent('Any matplotlib colormap listed at\n http://matplotlib.org/examples/color/colormaps_reference.html\n Default: RdBu_r'),
default='hot')
parser.add_argument('-cb', '--cbar',
action='store_true',
help=textwrap.dedent('Display a colorbar on the right of the plots\n Default: False'),
default=False)
parser.add_argument('--black_bg',
action='store_true',
help=textwrap.dedent('Set the background to black.\n Default: White'),
default=False)
"""
parser.add_argument('--thr_abs',
type=float,
metavar='thr_abs',
help=textwrap.dedent('Mask the input image such that all values\n which have an absolute value less than this threshold\n are not shown.\nIf None then no thresholding is undertaken.\nDefault: None'),
default=None)
parser.add_argument('--thr_pos',
type=float,
metavar='thr_pos',
help=textwrap.dedent('Mask the input image such that all values\n less than this threshold are not shown.\nIf None then no thresholding is undertaken.\nDefault: None'),
default=None)
parser.add_argument('-l', '--lower',
type=float,
metavar='lowerthr',
help='Lower limit for colormap',
default=None)
parser.add_argument('-u', '--upper',
type=float,
metavar='upperthr',
help='Upper limit for colormap',
default=None)
"""
parser.add_argument('--dpi',
type=float,
metavar='dpi',
help='DPI of output png file\n Default: 300',
default=300)
parser.add_argument('--format',
type=float,
metavar='format',
help=textwrap.dedent('Format of the output image file.\n Eg: png, pdf, tif, jpeg, svg. \n Default: png'),
default='png')
arguments = parser.parse_args()
return arguments, parser
def get_labels(data, cluster_thr=0, min_extent=0):
"""
Get number of clusters in dataset as well as a labeled volume
Minimal extent of each cluster and voxel-vise threshold can be specified
"""
# Threshold the data by zeroing all voxels with values that have an
# absolute value less than the cluster threshold
thr_data = abs(data) > cluster_thr
# Find all the clusters in the thresholded data
labels, nlabels = sci_label(thr_data)
# Now loop through all the clusters
# and if a cluster is smaller than the minimum cluster extent
# exclude it from the list (set the values to zero)
for idx in range(1, nlabels + 1):
if np.sum(labels == idx) < min_extent:
labels[labels == idx] = 0
# Re-run the clustering command to get only the clusters
# that are larger than the minimum extent
labels, nlabels = sci_label(labels)
# overwrites the input data with the thresholded data
binarized_data = labels.astype('bool')
data[np.invert(binarized_data)] = 0
return labels, nlabels, data, binarized_data
def get_cluster_info(img, affine, data):
"""
Returns peak coordinations and cluster information of a given dataset,
if labeled file and affine is provided
"""
# Set up some variables we're going to need
coords = [] #
cs = [] # cluster sum values
maxcoords = [] # peak coordinate locations
clusterInfo = [] # list of lists containing max, min,
# mean and std of the cluster
# Find all the label ids (that aren't 0!)
labelID = np.setdiff1d(np.unique(img.ravel()), [0])
# Loop through the labels
for lab in labelID:
# Calculate the voume of the cluster
sumval = np.sum(img == lab)
cs.append(sumval)
# Calculate the max, min, mean and std of the cluster
maxval = np.max(data[img == lab])
minval = np.min(data[img == lab])
meanval = np.mean(data[img == lab])
stdval = np.std(data[img == lab])
# Save these values in a list
clusterInfo.append([sumval, maxval, minval, meanval, stdval])
# Get the location of the peak coordinate
maxidx = np.nonzero(np.multiply(data, img == lab) == maxval)
maxcoords.append([m[0] for m in maxidx])
# Transform the lists into numpy arrays
maxcoords = np.asarray(maxcoords)
clusterInfo = np.asarray(clusterInfo)
# Sort the lists by the volume of the clusters
maxcoords = maxcoords[np.argsort(cs)[::-1], :]
clusterInfo = clusterInfo[np.argsort(cs)[::-1], :]
# Loop through the clusters and put the peak coordinates
# in MNI space
for i, lab in enumerate(labelID[np.argsort(cs)[::-1]]):
coords.append(np.dot(affine,
np.hstack((maxcoords[i], 1)))[:3].tolist())
# Add the peak coordinate information to the clusterInfo array
clusterInfo = np.hstack((np.array(coords), clusterInfo))
# Returns peak coordination and additional cluster infos
return coords, clusterInfo
def show_slices(data, affine,
coords=None,
cmap=None,
show_colorbar=None,
showCross=False,
cluster_thr=0,
annotate=True, ###### KW DOCUMENT
template='../scripts/templates/MNI152_T1_1mm_brain.nii.gz', ####### KW DOCUMENT
dpiRes=300,
suffix='png',
show_title=False):
# Prepare background image
anatimg = nb.load(template)
anatdata, anataff = anatimg.get_data(), anatimg.affine()
anatdata = anatdata.astype(np.float)
anatdata[anatdata < 10.] = np.nan
# Create output figure for each peak coordinate
# (so a different figure for each cluster)
for idx, coord in enumerate(coords):
# Name the output file to include the cluster id,
# the cluster threshold and the minimum cluster extent
outfile = 'Cluster_{}_thr{:04.2f}_minext{:03:0f}'.format(idx, cluster_thr, cluster_extent)
# If show_title argument has been set to true then print the file name
# and the peak coordinates in the title of the figure
if show_title:
title = '{} {}'.format(outfile + coord)
else:
title = ''
# Woooo plot three orthogonal views of the cluster sliced through the
# peak coordinate
osl = viz.plot_map(
np.asarray(data), affine, anat=anatdata, anat_affine=anataff,
threshold=cluster_thr, cmap=cmap, annotate=annotate,
black_bg=False, cut_coords=coord, draw_cross=showCross,
slicer='ortho', title=title)
# If the show colorbar option is true then show the color bar on the
# right hand side of the image
if show_colorbar:
cbarLocation = [-0.1, 0.2, 0.015, 0.6]
im = plt.gca().get_images()[1]
cb = plt.colorbar(im, cax=plt.axes(cbarLocation),
orientation='horizontal', format='%.2f')
cb.set_ticks([cb._values.min(), cb._values.max()])
# Save the figure!
osl.frame_axes.figure.savefig(
opj(output_folder, '{}.{}'.format(outfile, suffix)),
dpi=dpiRes, bbox_inches='tight', transparent=True)
# DONE! Close the plot
plt.close()
#=============================================================================
# SET SOME VARIABLES
#=============================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
stats_file = arguments.stats_file
cluster_extent = arguments.cluster_extent
cluster_thr = arguments.cluster_thr
store_csv = arguments.csv
cluster_title = arguments.cluster_title
cmap = arguments.cmap
show_colorbar = arguments.cbar
#thr_abs = arguments.thr_abs
#thr_pos = arguments.thr_pos
#black_bg = arguments.black_bg
#lower_thresh = arguments.lower
#upper_thresh = arguments.upper
dpi = arguments.dpi
image_format = arguments.format
# Set a couple of hard coded options
#symmetric_cbar=False
#===============================================================================
# Get the colormap from nilearn
#===============================================================================
if hasattr(cm, cmap):
cmap = getattr(cm, cmap)
#===============================================================================
# Create the output folder
#===============================================================================
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
def create_output(stats_file, cluster_extent, threshold, template, create_CSV,
show_cross, annotate_figure, cmap, show_colorbar,
show_title, dpi, imageType):
# Read in the stats file
img = nb.load(stats_file)
data = img.get_data()
affine = img.affine()
# Find the clusters
labels, nlabels, data, binarized_data = get_labels(data,
cluster_thr=cluster_thr,
min_extent=cluster_extent)
# Catch if nlabels is 0, i.e. no clusters survived thresholding
if nlabels == 0:
print('No clusters survive the thresholds in {}'.format(stats_file))
return
# If there *are* cluster though, then get the cluster information
# for each of them
print('{} clusters were found in {}'.format(nlabels, stats_file))
coords, clusterInfo = get_cluster_info(labels, affine, data)
"""
# Get file prefix
if filePath.endswith('.nii'):
filename = opb(filePath)[:-4]
elif filePath.endswith('.nii.gz'):
filename = opb(filePath)[:-7]
"""
# Create output folder
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
# Create figures
show_slices(data, affine,
coords=coords,
cmap=cmap,
show_colorbar=show_colorbar,
showCross=False, ####### KW THINK ABOUT
cluster_thr=cluster_thr,
annotate=True, ###### KW DOCUMENT
template='../scripts/templates/MNI152_T1_1mm_brain.nii.gz', ####### KW DOCUMENT
dpiRes=dpi,
suffix=image_format,
show_title=show_title)
# Create CSV output
if create_CSV:
header = 'X,Y,Z,Size,Max,Min,Mean,Std'
np.savetxt(
opj('figures', filename, 'cluster_info.csv'), clusterInfo,
delimiter=',', fmt='%.8f', header=header, comments='')
# Print cluster info in terminal
row_format = "{:>8}{:>8}{:>8}{:>10}{:>16}{:>16}{:>16}{:>16}"
print(row_format.format(
*['X', 'Y', 'Z', 'Size', 'Max', 'Min', 'Mean', 'Std']))
for c in clusterInfo:
print(row_format.format(*c))
print('\n')
#===============================================================================
# Save the figure
#===============================================================================
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
if __name__ == "__main__":
cluster_extend = int(sys.argv[1])
threshold = float(sys.argv[2])
template = str(sys.argv[3])
create_CSV = bool(sys.argv[4])
show_cross = bool(sys.argv[5])
annotate_figure = bool(sys.argv[6])
show_colorbar = bool(sys.argv[7])
colorbar_orientation = str(sys.argv[8])
show_title = bool(sys.argv[9])
dpi = int(sys.argv[10])
imageType = str(sys.argv[11])
prefix = str(sys.argv[12])
#=========================================================================
# SET SOME VARIABLES
#=========================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
stats_file = arguments.stats_file
cluster_extent = arguments.cluster_extent
cluster_thr = arguments.cluster_thr
store_csv = arguments.csv
cluster_title = arguments.cluster_title
cmap = arguments.cmap
show_colorbar = arguments.cbar
#thr_abs = arguments.thr_abs
#thr_pos = arguments.thr_pos
#black_bg = arguments.black_bg
#lower_thresh = arguments.lower
#upper_thresh = arguments.upper
dpi = arguments.dpi
image_format = arguments.format
#===============================================================================
# Get the colormap from nilearn
#===============================================================================
if hasattr(cm, cmap):
cmap = getattr(cm, cmap)
#===============================================================================
# Create the output folder
#===============================================================================
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
#===============================================================================
# Create the figures and CSV output
#===============================================================================
create_output(stats_file, cluster_extent, threshold, template, create_CSV,
show_cross, annotate_figure, cmap, show_colorbar,
show_title, dpi, imageType)
| mit |
PalNilsson/pilot2 | pilot/info/storagedata.py | 1 | 6509 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Alexey Anisenkov, anisyonk@cern.ch, 2018
# - Paul Nilsson, paul.nilsson@cern.ch, 2019
"""
The implementation of data structure to host storage data description.
The main reasons for such incapsulation are to
- apply in one place all data validation actions (for attributes and values)
- introduce internal information schema (names of attribues) to remove direct dependency
with data structrure, formats, names from external sources (e.g. AGIS/CRIC)
:author: Alexey Anisenkov
:contact: anisyonk@cern.ch
:date: January 2018
"""
import traceback
from pilot.util import https
from pilot.util.config import config
from .basedata import BaseData
import logging
logger = logging.getLogger(__name__)
class StorageData(BaseData):
"""
High-level object to host Storage details (available protocols, etc.)
"""
## put explicit list of all the attributes with comments for better inline-documentation by sphinx
## FIX ME LATER: use proper doc format
## incomplete list of attributes .. to be extended once becomes used
pk = 0 # unique identification number
name = "" # DDMEndpoint name
type = "" # type of Storage <- can this be renamed to storagetype without causing any problem with queuedata?
token = "" # space token descriptor
is_deterministic = None
state = None
site = None # ATLAS Site name
arprotocols = {}
rprotocols = {}
special_setup = {}
resource = None
# specify the type of attributes for proper data validation and casting
_keys = {int: ['pk'],
str: ['name', 'state', 'site', 'type', 'token'],
dict: ['copytools', 'acopytools', 'astorages', 'arprotocols', 'rprotocols', 'resource'],
bool: ['is_deterministic']
}
def __init__(self, data):
"""
:param data: input dictionary of storage description by DDMEndpoint name as key
"""
self.load(data)
# DEBUG
#import pprint
#logger.debug('initialize StorageData from raw:\n%s' % pprint.pformat(data))
#logger.debug('Final parsed StorageData content:\n%s' % self)
def load(self, data):
"""
Construct and initialize data from ext source
:param data: input dictionary of storage description by DDMEndpoint name as key
"""
# the translation map of the queue data attributes from external data to internal schema
# first defined ext field name will be used
# if key is not explicitly specified then ext name will be used as is
## fix me later to proper internal names if need
kmap = {
# 'internal_name': ('ext_name1', 'extname2_if_any')
# 'internal_name2': 'ext_name3'
'pk': 'id',
}
self._load_data(data, kmap)
## custom function pattern to apply extra validation to the key values
##def clean__keyname(self, raw, value):
## :param raw: raw value passed from ext source as input
## :param value: preliminary cleaned and casted to proper type value
##
## return value
# to be improved: move it to some data loader
def get_security_key(self, secret_key, access_key):
"""
Get security key pair from panda
:param secret_key: secrect key name as string
:param access_key: access key name as string
:return: setup as a string
"""
try:
data = {'privateKeyName': secret_key, 'publicKeyName': access_key}
logger.info("Getting key pair: %s" % data)
res = https.request('{pandaserver}/server/panda/getKeyPair'.format(pandaserver=config.Pilot.pandaserver),
data=data)
if res and res['StatusCode'] == 0:
return {"publicKey": res["publicKey"], "privateKey": res["privateKey"]}
else:
logger.info("Got key pair returns wrong value: %s" % res)
except Exception as ex:
logger.error("Failed to get key pair(%s,%s): %s, %s" % (access_key, secret_key, ex, traceback.format_exc()))
return {}
def get_special_setup(self, protocol_id=None):
"""
Construct special setup for ddms such as objectstore
:param protocol_id: protocol id.
:return: setup as a string
"""
logger.info("Get special setup for protocol id(%s)" % (protocol_id))
if protocol_id in self.special_setup and self.special_setup[protocol_id]:
return self.special_setup[protocol_id]
if protocol_id is None or str(protocol_id) not in list(self.rprotocols.keys()): # Python 2/3
return None
if self.type in ['OS_ES', 'OS_LOGS']:
self.special_setup[protocol_id] = None
settings = self.rprotocols.get(str(protocol_id), {}).get('settings', {})
access_key = settings.get('access_key', None)
secret_key = settings.get('secret_key', None)
is_secure = settings.get('is_secure', None)
# make sure all things are correctly defined in AGIS.
# If one of them is not defined correctly, will not setup this part. Then rucio client can try to use signed url.
# This part is preferred because signed url is not efficient.
if access_key and secret_key and is_secure:
key_pair = self.get_security_key(secret_key, access_key)
if "privateKey" not in key_pair or key_pair["privateKey"] is None:
logger.error("Failed to get the key pair for S3 objectstore from panda")
else:
setup = "export S3_ACCESS_KEY=%s; export S3_SECRET_KEY=%s; export S3_IS_SECURE=%s;" % (key_pair["publicKey"],
key_pair["privateKey"],
is_secure)
self.special_setup[protocol_id] = setup
logger.info("Return key pair with public key: %s" % key_pair["publicKey"])
return self.special_setup[protocol_id]
return None
| apache-2.0 |
davek44/Basset | src/dev/basset_conv2.py | 1 | 5911 | #!/usr/bin/env python
from optparse import OptionParser
import os
import random
import subprocess
import h5py
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
################################################################################
# basset_conv2.py
#
# Visualize the 2nd convolution layer of a CNN.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model_file> <test_hdf5_file>'
parser = OptionParser(usage)
parser.add_option('-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option('-s', dest='sample', default=None, type='int', help='Sample sequences from the test set [Default:%default]')
(options,args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide Basset model file and test data in HDF5 format.')
else:
model_file = args[0]
test_hdf5_file = args[1]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
#################################################################
# load sequences
test_hdf5_in = h5py.File(test_hdf5_file, 'r')
seq_vecs = np.array(test_hdf5_in['test_in'])
seq_targets = np.array(test_hdf5_in['test_out'])
test_hdf5_in.close()
#################################################################
# sample
#################################################################
if options.sample is not None:
# choose sampled indexes
sample_i = np.array(random.sample(xrange(seq_vecs.shape[0]), options.sample))
# filter
seq_vecs = seq_vecs[sample_i]
seq_targets = seq_targets[sample_i]
# create a new HDF5 file
sample_hdf5_file = '%s/sample.h5' % options.out_dir
sample_hdf5_out = h5py.File(sample_hdf5_file, 'w')
sample_hdf5_out.create_dataset('test_in', data=seq_vecs)
sample_hdf5_out.close()
# update test HDF5
test_hdf5_file = sample_hdf5_file
#################################################################
# Torch predict
#################################################################
if options.model_hdf5_file is None:
options.model_hdf5_file = '%s/model_out.h5' % options.out_dir
# TEMP
torch_cmd = './basset_conv2_predict.lua %s %s %s' % (model_file, test_hdf5_file, options.model_hdf5_file)
print torch_cmd
subprocess.call(torch_cmd, shell=True)
# load model output
model_hdf5_in = h5py.File(options.model_hdf5_file, 'r')
filter_weights = np.array(model_hdf5_in['weights'])
filter_outs = np.array(model_hdf5_in['outs'])
model_hdf5_in.close()
# store useful variables
num_filters = filter_weights.shape[0]
#################################################################
# individual filter plots
#################################################################
stats_out = open('%s/table_stats.txt'%options.out_dir, 'w')
weights_out = open('%s/table_weights.txt'%options.out_dir, 'w')
for f in range(num_filters):
print 'Filter %d' % f
# plot filter parameters as a heatmap
plot_filter_heat(filter_weights[f,:,:], '%s/filter%d_heat.pdf' % (options.out_dir,f))
# print filter parameters as table
for pos in range(filter_weights.shape[2]):
for c1 in range(filter_weights.shape[1]):
cols = (f, pos, c1, filter_weights[f,c1,pos])
print >> weights_out, '%-3d %2d %3d %6.3f' % cols
# plot density of filter output scores
fmean, fstd = plot_output_density(np.ravel(filter_outs[:,f,:]), '%s/filter%d_hist.pdf' % (options.out_dir,f))
row_cols = (f, fmean, fstd)
print >> stats_out, '%-3d %6.4f %6.4f' % row_cols
stats_out.close()
#################################################################
# all filter plots
#################################################################
filter_weights_mean = filter_weights.mean(axis=2)
sns.set(font_scale=0.25)
plt.figure()
g = sns.clustermap(filter_weights_mean, cmap='PRGn', figsize=(16,12))
for tick in g.ax_heatmap.get_xticklabels():
tick.set_rotation(-45)
tick.set_horizontalalignment('left')
plt.savefig('%s/f1f2_heat.pdf' % options.out_dir)
plt.close()
def plot_filter_heat(weight_matrix, out_pdf):
''' Plot a heatmap of the filter's parameters.
Args
weight_matrix: np.array of the filter's parameter matrix
out_pdf
'''
weight_range = abs(weight_matrix).max()
sns.set(font_scale=0.4)
plt.figure(figsize=(2,12))
sns.heatmap(weight_matrix, cmap='PRGn', linewidths=0.05, vmin=-weight_range, vmax=weight_range)
ax = plt.gca()
ax.set_xticklabels(range(1,weight_matrix.shape[1]+1))
plt.tight_layout()
plt.savefig(out_pdf)
plt.close()
def plot_output_density(f_outputs, out_pdf):
''' Plot the output density and compute stats.
Args
f_outputs: np.array of the filter's outputs
out_pdf
'''
sns.set(font_scale=1.3)
plt.figure()
sns.distplot(f_outputs, kde=False)
plt.xlabel('ReLU output')
plt.savefig(out_pdf)
plt.close()
return f_outputs.mean(), f_outputs.std()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
#pdb.runcall(main)
| mit |
gabrevaya/Canto5 | main.py | 2 | 2355 | '''
Función principal. Obtiene silabas separadas y caracterizadas desde un archivo wav.
'''
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from envolvente import envolvente
from read_wav import read_wav
from get_files_paths import get_files_paths
from find_syllables import find_syllables
from split_syllables import split_syllables
from spectrum import spectrum
#def main(bin_size, fbird):
#def main(directorio, bin_size, fbird):
directorio = '.'
bin_size = 1000
fbird = 1.8
[paths_to_files, file_names, bird_names]=get_files_paths(directorio, "wav", "bird_name.txt")
#audio_file="call1.wav"
for audio_file in paths_to_files:
[raw_audio, times, sample_rate, data_points]=read_wav(audio_file)
#Calculo e imprimo envolvente de máximos
[envelope, t_envelope]=envolvente(raw_audio, times, sample_rate, data_points, 100, 0, 0)
plt.plot(times, raw_audio)
plt.plot(t_envelope, envelope)
plt.xlabel('Time (s)')
plt.ylabel('Amplitud')
#Separación de sílabas: Nuevo cálculo de envolvente (media)
#bin_size=2500 #(depende)
[env, t_env]=envolvente(raw_audio, times, sample_rate, data_points, bin_size, 1, 0)
#Creación de señal lógica indicando ventanas donde están las sílabas
npoints_umbral=100
loc_silabas=find_syllables(raw_audio, times, env, t_env, npoints_umbral, fbird)
# Creada la señal lógica, con ella se corta el vector original
margen=50 #puntos
silabas=split_syllables(raw_audio, loc_silabas, margen)
time_windows=split_syllables(times, loc_silabas, margen)
nro_sil=len(silabas)
#i_silabas=[0:nro_sil:1]
# imprimo sílabas resaltándolas en igual posición en el grafico original
plt.plot(times, raw_audio)
for sil in range(nro_sil):
plt.plot(time_windows[sil], silabas[sil])
#aprovecho para corregir origen de vectores de tiempo resultantes del corte
time_windows[sil]=time_windows[sil]-np.min(time_windows[sil])
plt.show()
#imprimo sílabas en graficos diferentes
# fig= plt.subplots(nro_sil, 1, sharex='row')
#
# for sil in range(nro_sil):
# ax = plt.subplot(nro_sil,1,sil)
# ax.plot(time_windows[sil], silabas[sil])
#
# plt.xlabel('Time (s)')
# plt.ylabel('Amplitud')
# plt.show()
# #plt.savefig(directorio+'silabas.png')
# plt.close()
#
| gpl-3.0 |
jhonatanoliveira/pgmpy | pgmpy/estimators/ConstraintBasedEstimator.py | 5 | 24197 | #!/usr/bin/env python
from warnings import warn
from itertools import combinations
from pgmpy.base import UndirectedGraph
from pgmpy.models import BayesianModel
from pgmpy.estimators import StructureEstimator
from pgmpy.independencies import Independencies, IndependenceAssertion
class ConstraintBasedEstimator(StructureEstimator):
def __init__(self, data, **kwargs):
"""
Class for constraint-based estimation of BayesianModels from a given
data set. Identifies (conditional) dependencies in data set using
chi_square dependency test and uses the PC algorithm to estimate a DAG
pattern that satisfies the identified dependencies. The DAG pattern can
then be completed to a faithful BayesianModel, if possible.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
----------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques,
2009, Section 18.2
[2] Neapolitan, Learning Bayesian Networks, Section 10.1.2 for the PC algorithm (page 550),
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
"""
super(ConstraintBasedEstimator, self).__init__(data, **kwargs)
def estimate(self, significance_level=0.01):
"""
Estimates a BayesianModel for the data set, using the PC contraint-based
structure learning algorithm. Independencies are identified from the
data set using a chi-squared statistic with the acceptance threshold of
`significance_level`. PC identifies a partially directed acyclic graph (PDAG), given
that the tested independencies admit a faithful Bayesian network representation.
This method returns a BayesianModel that is a completion of this PDAG.
Parameters
----------
significance_level: float, default: 0.01
The significance level to use for conditional independence tests in the data set.
`significance_level` is the desired Type 1 error probability of
falsely rejecting the null hypothesis that variables are independent,
given that they are. The lower `significance_level`, the less likely
we are to accept dependencies, resulting in a sparser graph.
Returns
-------
model: BayesianModel()-instance
An estimate for the BayesianModel for the data set (not yet parametrized).
Reference
---------
Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(2500, 3)), columns=list('XYZ'))
>>> data['sum'] = data.sum(axis=1)
>>> print(data)
X Y Z sum
0 3 0 1 4
1 1 4 3 8
2 0 0 3 3
3 0 2 3 5
4 2 1 1 4
... .. .. .. ...
2495 2 3 0 5
2496 1 1 2 4
2497 0 4 2 6
2498 0 0 0 0
2499 2 4 0 6
[2500 rows x 4 columns]
>>> c = ConstraintBasedEstimator(data)
>>> model = c.estimate()
>>> print(model.edges())
[('Z', 'sum'), ('X', 'sum'), ('Y', 'sum')]
"""
skel, separating_sets = self.estimate_skeleton(significance_level)
pdag = self.skeleton_to_pdag(skel, separating_sets)
model = self.pdag_to_dag(pdag)
return model
def estimate_skeleton(self, significance_level=0.01):
"""Estimates a graph skeleton (UndirectedGraph) for the data set.
Uses the build_skeleton method (PC algorithm); independencies are
determined using a chisquare statistic with the acceptance threshold
of `significance_level`. Returns a tuple `(skeleton, separating_sets).
Parameters
----------
significance_level: float, default: 0.01
The significance level to use for conditional independence tests in the data set.
`significance_level` is the desired Type 1 error probability of
falsely rejecting the null hypothesis that variables are independent,
given that they are. The lower `significance_level`, the less likely
we are to accept dependencies, resulting in a sparser graph.
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set of variables that makes then conditionally independent.
(needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Chi-square test https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Test_of_independence
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>>
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 5)), columns=list('ABCDE'))
>>> data['F'] = data['A'] + data['B'] + data ['C']
>>> est = ConstraintBasedEstimator(data)
>>> skel, sep_sets = est.estimate_skeleton()
>>> skel.edges()
[('A', 'F'), ('B', 'F'), ('C', 'F')]
>>> # all independencies are unconditional:
>>> sep_sets
{('D', 'A'): (), ('C', 'A'): (), ('C', 'E'): (), ('E', 'F'): (), ('B', 'D'): (),
('B', 'E'): (), ('D', 'F'): (), ('D', 'E'): (), ('A', 'E'): (), ('B', 'A'): (),
('B', 'C'): (), ('C', 'D'): ()}
>>>
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(5000, 3)), columns=list('XYZ'))
>>> data['X'] += data['Z']
>>> data['Y'] += data['Z']
>>> est = ConstraintBasedEstimator(data)
>>> skel, sep_sets = est.estimate_skeleton()
>>> skel.edges()
[('X', 'Z'), ('Y', 'Z')]
>>> # X, Y dependent, but conditionally independent given Z:
>>> sep_sets
{('X', 'Y'): ('Z',)}
"""
nodes = self.state_names.keys()
def is_independent(X, Y, Zs):
"""Returns result of hypothesis test for the null hypothesis that
X _|_ Y | Zs, using a chi2 statistic and threshold `significance_level`.
"""
chi2, p_value, sufficient_data = self.test_conditional_independence(X, Y, Zs)
return p_value >= significance_level
return self.build_skeleton(nodes, is_independent)
@staticmethod
def estimate_from_independencies(nodes, independencies):
"""Estimates a BayesianModel from an Independencies()-object or a
decision function for conditional independencies. This requires that
the set of independencies admits a faithful representation (e.g. is a
set of d-seperation for some BN or is closed under the semi-graphoid
axioms). See `build_skeleton`, `skeleton_to_pdag`, `pdag_to_dag` for
details.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
model: BayesianModel instance
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.independencies import Independencies
>>> ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> ind = ind.closure()
>>> skel = ConstraintBasedEstimator.estimate_from_independencies("ABCD", ind)
>>> print(skel.edges())
[('B', 'D'), ('A', 'D'), ('C', 'D')]
>>> model = BayesianModel([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel = ConstraintBasedEstimator.estimate_from_independencies(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('B', 'C'), ('A', 'C'), ('C', 'E'), ('D', 'B')]
>>> # note that ('D', 'B') is flipped compared to the original network;
>>> # Both networks belong to the same PDAG/are I-equivalent
"""
skel, separating_sets = ConstraintBasedEstimator.build_skeleton(nodes, independencies)
pdag = ConstraintBasedEstimator.skeleton_to_pdag(skel, separating_sets)
dag = ConstraintBasedEstimator.pdag_to_dag(pdag)
return dag
@staticmethod
def pdag_to_dag(pdag):
"""Completes a PDAG to a DAG, without adding v-structures, if such a
completion exists. If no faithful extension is possible, some fully
oriented DAG that corresponds to the PDAG is returned and a warning is
generated. This is a static method.
Parameters
----------
pdag: DirectedGraph
A directed acyclic graph pattern, consisting in (acyclic) directed edges
as well as "undirected" edges, represented as both-way edges between
nodes.
Returns
-------
dag: BayesianModel
A faithful orientation of pdag, if one exists. Otherwise any
fully orientated DAG/BayesianModel with the structure of pdag.
References
----------
[1] Chickering, Learning Equivalence Classes of Bayesian-Network Structures,
2002; See page 454 (last paragraph) for the algorithm pdag_to_dag
http://www.jmlr.org/papers/volume2/chickering02a/chickering02a.pdf
[2] Dor & Tarsi, A simple algorithm to construct a consistent extension
of a partially oriented graph, 1992,
http://ftp.cs.ucla.edu/pub/stat_ser/r185-dor-tarsi.pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.base import DirectedGraph
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 3)), columns=list('ABD'))
>>> data['C'] = data['A'] - data['B']
>>> data['D'] += data['A']
>>> c = ConstraintBasedEstimator(data)
>>> pdag = c.skeleton_to_pdag(*c.estimate_skeleton())
>>> pdag.edges()
[('B', 'C'), ('D', 'A'), ('A', 'D'), ('A', 'C')]
>>> c.pdag_to_dag(pdag).edges()
[('B', 'C'), ('A', 'D'), ('A', 'C')]
>>> # pdag_to_dag is static:
... pdag1 = DirectedGraph([('A', 'B'), ('C', 'B'), ('C', 'D'), ('D', 'C'), ('D', 'A'), ('A', 'D')])
>>> ConstraintBasedEstimator.pdag_to_dag(pdag1).edges()
[('D', 'C'), ('C', 'B'), ('A', 'B'), ('A', 'D')]
>>> # example of a pdag with no faithful extension:
... pdag2 = DirectedGraph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')])
>>> ConstraintBasedEstimator.pdag_to_dag(pdag2).edges()
UserWarning: PDAG has no faithful extension (= no oriented DAG with the same v-structures as PDAG).
Remaining undirected PDAG edges oriented arbitrarily.
[('B', 'C'), ('A', 'B'), ('A', 'C')]
"""
pdag = pdag.copy()
dag = BayesianModel()
dag.add_nodes_from(pdag.nodes())
# add already directed edges of pdag to dag
for X, Y in pdag.edges():
if not pdag.has_edge(Y, X):
dag.add_edge(X, Y)
while pdag.number_of_nodes() > 0:
# find node with (1) no directed outgoing edges and
# (2) the set of undirected neighbors is either empty or
# undirected neighbors + parents of X are a clique
found = False
for X in pdag.nodes():
directed_outgoing_edges = set(pdag.successors(X)) - set(pdag.predecessors(X))
undirected_neighbors = set(pdag.successors(X)) & set(pdag.predecessors(X))
neighbors_are_clique = all((pdag.has_edge(Y, Z)
for Z in pdag.predecessors(X)
for Y in undirected_neighbors if not Y == Z))
if not directed_outgoing_edges and \
(not undirected_neighbors or neighbors_are_clique):
found = True
# add all edges of X as outgoing edges to dag
for Y in pdag.predecessors(X):
dag.add_edge(Y, X)
pdag.remove_node(X)
break
if not found:
warn("PDAG has no faithful extension (= no oriented DAG with the " +
"same v-structures as PDAG). Remaining undirected PDAG edges " +
"oriented arbitrarily.")
for X, Y in pdag.edges():
if not dag.has_edge(Y, X):
try:
dag.add_edge(X, Y)
except ValueError:
pass
break
return dag
@staticmethod
def model_to_pdag(model):
"""Construct the DAG pattern (representing the I-equivalence class) for
a given BayesianModel. This is the "inverse" to pdag_to_dag.
"""
if not isinstance(model, BayesianModel):
raise TypeError("model: Expected BayesianModel instance, " +
"got type {model_type}".format(model_type=type(model)))
skel, separating_sets = ConstraintBasedEstimator.build_skeleton(
model.nodes(),
model.get_independencies())
pdag = ConstraintBasedEstimator.skeleton_to_pdag(skel, separating_sets)
return pdag
@staticmethod
def skeleton_to_pdag(skel, separating_sets):
"""Orients the edges of a graph skeleton based on information from
`separating_sets` to form a DAG pattern (DirectedGraph).
Parameters
----------
skel: UndirectedGraph
An undirected graph skeleton as e.g. produced by the
estimate_skeleton method.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation)
Returns
-------
pdag: DirectedGraph
An estimate for the DAG pattern of the BN underlying the data. The
graph might contain some nodes with both-way edges (X->Y and Y->X).
Any completion by (removing one of the both-way edges for each such
pair) results in a I-equivalent Bayesian network DAG.
Reference
---------
Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 3)), columns=list('ABD'))
>>> data['C'] = data['A'] - data['B']
>>> data['D'] += data['A']
>>> c = ConstraintBasedEstimator(data)
>>> pdag = c.skeleton_to_pdag(*c.estimate_skeleton())
>>> pdag.edges() # edges: A->C, B->C, A--D (not directed)
[('B', 'C'), ('A', 'C'), ('A', 'D'), ('D', 'A')]
"""
pdag = skel.to_directed()
node_pairs = combinations(pdag.nodes(), 2)
# 1) for each X-Z-Y, if Z not in the separating set of X,Y, then orient edges as X->Z<-Y
# (Algorithm 3.4 in Koller & Friedman PGM, page 86)
for X, Y in node_pairs:
if not skel.has_edge(X, Y):
for Z in set(skel.neighbors(X)) & set(skel.neighbors(Y)):
if Z not in separating_sets[frozenset((X, Y))]:
pdag.remove_edges_from([(Z, X), (Z, Y)])
progress = True
while progress: # as long as edges can be oriented (removed)
num_edges = pdag.number_of_edges()
# 2) for each X->Z-Y, orient edges to Z->Y
for X, Y in node_pairs:
for Z in ((set(pdag.successors(X)) - set(pdag.predecessors(X))) &
(set(pdag.successors(Y)) & set(pdag.predecessors(Y)))):
pdag.remove(Y, Z)
# 3) for each X-Y with a directed path from X to Y, orient edges to X->Y
for X, Y in node_pairs:
for path in nx.all_simple_paths(pdag, X, Y):
is_directed = True
for src, dst in path:
if pdag.has_edge(dst, src):
is_directed = False
if is_directed:
pdag.remove(Y, X)
break
# 4) for each X-Z-Y with X->W, Y->W, and Z-W, orient edges to Z->W
for X, Y in node_pairs:
for Z in (set(pdag.successors(X)) & set(pdag.predecessors(X)) &
set(pdag.successors(Y)) & set(pdag.predecessors(Y))):
for W in ((set(pdag.successors(X)) - set(pdag.predecessors(X))) &
(set(pdag.successors(Y)) - set(pdag.predecessors(Y))) &
(set(pdag.successors(Z)) & set(pdag.predecessors(Z)))):
pdag.remove(W, Z)
progress = num_edges > pdag.number_of_edges()
return pdag
@staticmethod
def build_skeleton(nodes, independencies):
"""Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of BayesianModel:
... model = BayesianModel([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
"""
nodes = list(nodes)
if isinstance(independencies, Independencies):
def is_independent(X, Y, Zs):
return IndependenceAssertion(X, Y, Zs) in independencies
elif callable(independencies):
is_independent = independencies
else:
raise ValueError("'independencies' must be either Independencies-instance " +
"or a ternary function that decides independencies.")
graph = UndirectedGraph(combinations(nodes, 2))
lim_neighbors = 0
separating_sets = dict()
while not all([len(graph.neighbors(node)) < lim_neighbors for node in nodes]):
for node in nodes:
for neighbor in graph.neighbors(node):
# search if there is a set of neighbors (of size lim_neighbors)
# that makes X and Y independent:
for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors):
if is_independent(node, neighbor, separating_set):
separating_sets[frozenset((node, neighbor))] = separating_set
graph.remove_edge(node, neighbor)
break
lim_neighbors += 1
return graph, separating_sets
| mit |
hlin117/statsmodels | statsmodels/sandbox/nonparametric/kdecovclass.py | 33 | 5703 | '''subclassing kde
Author: josef pktd
'''
import numpy as np
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
'''
from Anne Archibald in mailinglist:
http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
'''
def __init__(self, dataset, covariance):
self.covariance = covariance
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
def __init__(self, dataset, covfact = 'scotts'):
self.covfact = covfact
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
def covariance_factor(self):
if self.covfact in ['sc', 'scotts']:
return self.scotts_factor()
if self.covfact in ['si', 'silverman']:
return self.silverman_factor()
elif self.covfact:
return float(self.covfact)
else:
raise ValueError('covariance factor has to be scotts, silverman or a number')
def reset_covfact(self, covfact):
self.covfact = covfact
self.covariance_factor()
self._compute_covariance()
def plotkde(covfact):
gkde.reset_covfact(covfact)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
plt.legend()
from numpy.testing import assert_array_almost_equal, \
assert_almost_equal, assert_
def test_kde_1d():
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
print(xnmean, xnstd)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density funtion for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
print('MSE', np.sum((kdepdf - normpdf)**2))
print('axabserror', np.max(np.abs(kdepdf - normpdf)))
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
#assert_array_almost_equal(kdepdf, normpdf, decimal=2)
print(gkde.integrate_gaussian(0.0, 1.0))
print(gkde.integrate_box_1d(-np.inf, 0.0))
print(gkde.integrate_box_1d(0.0, np.inf))
print(gkde.integrate_box_1d(-np.inf, xnmean))
print(gkde.integrate_box_1d(xnmean, np.inf))
assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
## (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
# generate a sample
n_basesample = 1000
np.random.seed(8765678)
alpha = 0.6 #weight for (prob of) lower distribution
mlow, mhigh = (-3,3) #mean locations for gaussian mixture
xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),
mhigh + np.random.randn((1-alpha) * n_basesample)])
# get kde for original sample
#gkde = stats.gaussian_kde(xn)
gkde = gaussian_kde_covfact(xn, 0.1)
# evaluate the density funtion for the kde for some points
ind = np.linspace(-7,7,101)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
gkde = gaussian_kde_covfact(xn, 'scotts')
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
#plt.show()
for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
plotkde(cv)
test_kde_1d()
np.random.seed(8765678)
n_basesample = 1000
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
| bsd-3-clause |
markomanninen/strongs | isopsephy/search.py | 5 | 1537 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: search.py
def find_cumulative_indices(list_of_numbers, search_sum):
"""
find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]]
"""
u = 0
y = 0
result = []
for idx, val in enumerate(list_of_numbers):
y += list_of_numbers[idx]
while y >= search_sum:
if y == search_sum:
result.append(range(u, idx+1))
y -= list_of_numbers[u]
u += 1
# if matches are not found, empty string is returned
# for easier cell data handling on pandas dataframe
return result or ''
# http://stackoverflow.com/questions/21380268/matching-the-sum-of-values-on-string
def search_by_num(text, num):
return list2string(find_number(string2list(text), num))
def list2string(alist):
return " ".join(map(str, alist))
def string2list(slist):
return list(map(int, slist.split()))
def find_number(alist, total):
u = 0
y = 0 # the current sum of the interval (u .. v)
res = []
for v in range(0, len(alist)):
# at this point the interval sum y is smaller than the requested total,
# so we move the right end of the interval forward
y += alist[v]
while y >= total:
if y == total:
res.append(list2string(alist[ u : v+1 ]))
# if the current sum is too large, move the left end of the interval forward
y -= alist[u]
u += 1
return res | mit |
jstoxrocky/statsmodels | statsmodels/datasets/randhie/data.py | 25 | 2667 | """RAND Health Insurance Experiment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is in the public domain."""
TITLE = __doc__
SOURCE = """
The data was collected by the RAND corporation as part of the Health
Insurance Experiment (HIE).
http://www.rand.org/health/projects/hie.html
This data was used in::
Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods
and Applications,` Cambridge: New York.
And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html>
See randhie/src for the original data and description. The data included
here contains only a subset of the original data. The data varies slightly
compared to that reported in Cameron and Trivedi.
"""
DESCRSHORT = """The RAND Co. Health Insurance Experiment Data"""
DESCRLONG = """"""
NOTE = """::
Number of observations - 20,190
Number of variables - 10
Variable name definitions::
mdvis - Number of outpatient visits to an MD
lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100
idp - 1 if individual deductible plan, 0 otherwise
lpi - ln(max(1, annual participation incentive payment))
fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise
physlm - 1 if the person has a physical limitation
disea - number of chronic diseases
hlthg - 1 if self-rated health is good
hlthf - 1 if self-rated health is fair
hlthp - 1 if self-rated health is poor
(Omitted category is excellent self-rated health)
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
PATH = '%s/%s' % (dirname(abspath(__file__)), 'randhie.csv')
def load():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
from pandas import read_csv
data = read_csv(PATH)
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(PATH, "rb"), delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
openpathsampling/openpathsampling | openpathsampling/tests/test_histogram.py | 2 | 11957 | from __future__ import division
from __future__ import absolute_import
from past.utils import old_div
from builtins import object
from .test_helpers import assert_items_almost_equal, assert_items_equal
import pytest
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
import collections
from openpathsampling.numerics import (Histogram, SparseHistogram,
HistogramPlotter2D,
histograms_to_pandas_dataframe)
class MockAxes(object):
def __init__(self, xticks, yticks):
self.xticks = xticks
self.yticks = yticks
def get_xticks(self): return self.xticks
def get_yticks(self): return self.yticks
class TestFunctions(object):
def test_histograms_to_pandas_dataframe(self):
data = [1.0, 1.1, 1.2, 1.3, 2.0, 1.4, 2.3, 2.5, 3.1, 3.5]
# This length needs to be larger than 10 to see a difference between
# str ordering and int ordering
hists = [Histogram(n_bins=5) for i in range(11)]
for hist in hists:
_ = hist.histogram(data)
df = histograms_to_pandas_dataframe(hists)
# sort like is done in analysis
df = df.sort_index(axis=1)
# This breaks if the sorting is done based on strings as that will
# return [0, 1, 10 ...] instead of [0, 1, 2, ...]
for i, c in enumerate(df.columns):
assert str(c) == str(i)
class TestHistogram(object):
def setup(self):
self.data = [1.0, 1.1, 1.2, 1.3, 2.0, 1.4, 2.3, 2.5, 3.1, 3.5]
self.nbins = 5
self.bins = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5]
self.left_bin_edges = (1.0,)
self.bin_widths = (0.5,)
self.hist = collections.Counter({(0,): 5, (2,): 2, (3,): 1,
(4,): 1, (5,): 1})
self.default_hist = Histogram()
self.hist_nbins = Histogram(n_bins=5)
self.hist_nbins_binwidth = Histogram(n_bins=5, bin_width=1.0)
self.hist_nbins_range = Histogram(n_bins=5, bin_range=(1.0, 3.5))
self.hist_binwidth_range = Histogram(bin_width=0.5,
bin_range=(1.0, 3.5))
def test_initialization(self):
assert self.default_hist.bins == 20
assert self.hist_nbins.bins == 5
assert self.hist_nbins.bin_width is None
assert self.hist_nbins_binwidth.bins == 5
assert self.hist_nbins_binwidth.bin_width is None
assert self.hist_nbins_range.bins == self.bins
assert self.hist_binwidth_range.bins == self.bins
def test_build_from_data(self):
hist = self.hist_nbins.histogram(self.data)
assert self.hist_nbins.count == 10
assert hist == self.hist
hist2 = self.hist_binwidth_range.histogram(self.data)
assert self.hist_binwidth_range.count == 10
assert hist2 == self.hist
def test_build_from_data_fail(self):
histo = Histogram(n_bins=5)
with pytest.raises(RuntimeError, match="called without data"):
histo.histogram()
def test_add_data_to_histogram(self):
histogram = Histogram(n_bins=5, bin_range=(1.0, 3.5))
hist = histogram.add_data_to_histogram(self.data)
assert histogram.count == 10
assert hist == self.hist
hist2 = histogram.add_data_to_histogram(self.data)
assert hist2 == hist+hist
assert histogram.count == 20
def test_compare_parameters(self):
assert self.hist_nbins.compare_parameters(None) is False
assert (
self.hist_nbins_range.compare_parameters(self.hist_binwidth_range)
is True
)
assert (
self.hist_binwidth_range.compare_parameters(self.hist_nbins_range)
is True
)
histo = Histogram(n_bins=5)
assert self.hist_nbins_range.compare_parameters(histo) is False
histo.histogram(self.data)
assert self.hist_nbins_range.compare_parameters(histo) is False
assert (
self.hist_nbins_range.compare_parameters(self.hist_nbins)
is False
)
assert histo.compare_parameters(self.hist_nbins) is False
assert self.hist_nbins.compare_parameters(histo) is False
def test_compare_parameters_empty(self):
# regression test; this was preventing histograms from being added
hist = self.hist_binwidth_range
assert hist.compare_parameters(hist.empty_copy())
def test_xvals(self):
histo = Histogram(n_bins=5)
_ = histo.histogram(self.data) # need this to set the bins
assert histo.left_bin_edges == self.left_bin_edges
assert histo.bin_widths == self.bin_widths
assert all(histo.xvals("l") == [1.0, 1.5, 2.0, 2.5, 3.0, 3.5])
assert all(histo.xvals("r") == [1.5, 2.0, 2.5, 3.0, 3.5, 4.0])
assert all(histo.xvals("m") == [1.25, 1.75, 2.25, 2.75, 3.25, 3.75])
def test_normalization(self):
histo = Histogram(n_bins=5)
_ = histo.histogram(self.data)
assert histo._normalization() == 5.0
def test_normalized(self):
histo = Histogram(n_bins=5)
_ = histo.histogram(self.data)
assert (list(histo.normalized().values()) ==
[1.0, 0.0, 0.4, 0.2, 0.2, 0.2])
assert (list(histo.normalized(raw_probability=True).values()) ==
[0.5, 0.0, 0.2, 0.1, 0.1, 0.1])
def test_cumulative(self):
histo = Histogram(n_bins=5)
_ = histo.histogram(self.data)
cumulative = list(histo.cumulative(None).values())
assert_items_almost_equal(cumulative, [5.0, 5.0, 7.0, 8.0, 9.0, 10.0])
assert_items_almost_equal(histo.cumulative(maximum=1.0),
[0.5, 0.5, 0.7, 0.8, 0.9, 1.0])
def test_cumulative_all_zero_warn(self):
histo = Histogram(bin_width=0.5, bin_range=(1.0, 3.5))
histo._histogram = collections.Counter({(0,): 0, (1,): 0})
with pytest.warns(UserWarning, match=r"No non-zero"):
cumul = histo.cumulative()
assert cumul(2.13) == 0
for val in cumul.values():
assert val == 0
def test_reverse_cumulative(self):
histo = Histogram(n_bins=5)
histo.histogram(self.data)
rev_cumulative = histo.reverse_cumulative(maximum=None)
assert_items_almost_equal(list(rev_cumulative.values()),
[10, 5, 5, 3, 2, 1])
rev_cumulative = histo.reverse_cumulative(maximum=1.0)
assert_items_almost_equal(list(rev_cumulative.values()),
[1.0, 0.5, 0.5, 0.3, 0.2, 0.1])
def test_reverse_cumulative_all_zero_warn(self):
histo = Histogram(bin_width=0.5, bin_range=(1.0, 3.5))
histo._histogram = collections.Counter({(0,): 0, (1,): 0})
with pytest.warns(UserWarning, match=r"No non-zero"):
rcumul = histo.reverse_cumulative()
assert rcumul(3.12) == 0
for val in rcumul.values():
assert val == 0
def test_left_bin_error(self):
histo = Histogram(bin_width=0.5, bin_range=(-1.0, 3.5))
histo.histogram([3.5])
assert histo.reverse_cumulative() != 0
class TestSparseHistogram(object):
def setup(self):
data = [(0.0, 0.1), (0.2, 0.7), (0.3, 0.6), (0.6, 0.9)]
self.histo = SparseHistogram(bin_widths=(0.5, 0.3),
left_bin_edges=(0.0, -0.1))
self.histo.histogram(data)
def test_correct(self):
correct_results = collections.Counter({
(0, 0): 1,
(0, 2): 2,
(1, 3): 1
})
assert self.histo._histogram == correct_results
def test_call(self):
histo_fcn = self.histo()
# voxels we have filled
assert histo_fcn((0.25, 0.65)) == 2
assert histo_fcn((0.01, 0.09)) == 1
assert histo_fcn((0.61, 0.89)) == 1
# empty voxel gives 0
assert histo_fcn((2.00, 2.00)) == 0
def test_normalized(self):
raw_prob_normed = self.histo.normalized(raw_probability=True)
assert pytest.approx(raw_prob_normed((0.25, 0.65))) == 0.5
assert pytest.approx(raw_prob_normed((0.01, 0.09))) == 0.25
assert pytest.approx(raw_prob_normed((0.61, 0.89))) == 0.25
normed_fcn = self.histo.normalized()
assert pytest.approx(normed_fcn((0.25, 0.65))) == old_div(0.5, 0.15)
assert pytest.approx(normed_fcn((0.01, 0.09))) == old_div(0.25, 0.15)
assert pytest.approx(normed_fcn((0.61, 0.89))) == old_div(0.25, 0.15)
def test_mangled_input(self):
# Sometimes singleton cvs are not unpacked properly
data = ([0.0], [0.1])
# This line might return mangled output
out = self.histo.map_to_bins(data)
# This raises on modern numpy if this is not 1D
_ = max(out)
class TestHistogramPlotter2D(object):
def setup(self):
data = [(0.0, 0.1), (0.2, 0.7), (0.3, 0.6), (0.6, 0.9)]
histo = SparseHistogram(bin_widths=(0.5, 0.3),
left_bin_edges=(0.0, -0.1))
histo.histogram(data)
self.plotter = HistogramPlotter2D(histo)
def test_to_bins(self):
vals = [-0.1, 0.5, 0.8]
xbins = self.plotter.to_bins(vals, 0)
assert_items_almost_equal(xbins, [-0.2, 1.0, 1.6])
ybins = self.plotter.to_bins(vals, 1)
truth = [0.0, 2.0, 3.0]
for y, t in zip(ybins, truth):
assert pytest.approx(y) == t
assert self.plotter.to_bins(None, 0) is None
def test_axis_input(self):
xt, xr, xl = self.plotter.axis_input(hist=[-1.0, 1.0, 2.0],
ticklabels=None,
lims=None,
dof=0)
assert xt is None
assert_items_equal(xr, (-1.0, 2.0))
assert_items_equal(xl, (0, 3))
xt, xr, xl = self.plotter.axis_input(hist=[-1.0, 1.0, 2.0],
ticklabels=[-1.0, 0.0, 1.0],
lims=None,
dof=0)
assert_items_equal(xt, [-2.0, 0.0, 2.0])
assert_items_equal(xr, (-2.0, 2.0))
assert_items_equal(xl, (0, 4.0))
xt, xr, xl = self.plotter.axis_input(hist=[-1.0, 1.0, 2.0],
ticklabels=[-1.0, 0.0, 1.0],
lims=(-2.5, 0.0),
dof=0)
assert_items_equal(xt, [-2.0, 0.0, 2.0])
assert_items_equal(xr, (-5.0, 2.0))
assert_items_equal(xl, (0.0, 5.0))
def test_ticks_and_labels(self):
# mock axes, make sure they work as expected
fake_ax = MockAxes([-1.0, 0.0, 1.0], [-6.0, 0.0, 6.0])
assert_items_equal(fake_ax.get_xticks(), [-1.0, 0.0, 1.0])
assert_items_equal(fake_ax.get_yticks(), [-6.0, 0.0, 6.0])
old_format = self.plotter.label_format
self.plotter.label_format = "{:4.2f}"
xticks, xlabels = self.plotter.ticks_and_labels(
ticks=[-2.0, -1.0, 0.0, 1.0, 2.0], ax=fake_ax, dof=0
)
assert_items_almost_equal(xticks, [-2.0, -1.0, 0.0, 1.0, 2.0])
assert_items_equal(xlabels, ["-1.00", "-0.50", "0.00", "0.50", "1.00"])
yticks, ylabels = self.plotter.ticks_and_labels(
ticks=None, ax=fake_ax, dof=1
)
assert_items_almost_equal(yticks, [-6.0, 0.0, 6.0])
assert_items_equal(ylabels, ["-1.90", "-0.10", "1.70"])
self.plotter.label_format = old_format
| mit |
rafiqsaleh/VERCE | verce-hpc-pe/src/postproc.py | 2 | 8125 | from verce.GenericPE import GenericPE, NAME
import os
class WatchDirectory(GenericPE):
OUTPUT_NAME='output'
def __init__(self, directory):
GenericPE.__init__(self)
self.outputconnections = { WatchDirectory.OUTPUT_NAME : { NAME : WatchDirectory.OUTPUT_NAME }}
self.directory = directory
def process(self, inputs):
for dir_entry in os.listdir(self.directory):
print 'directory: %s' % dir_entry
dir_entry_path = os.path.join(self.directory, dir_entry)
if os.path.isfile(dir_entry_path):
self.write(WatchDirectory.OUTPUT_NAME, [ dir_entry_path ] )
from obspy.core import trace,stream
import numpy
from lxml import etree
from StringIO import StringIO
import pickle
class Specfem3d2Stream(GenericPE):
INPUT_NAME ='input'
OUTPUT_NAME='output'
def __init__(self):
GenericPE.__init__(self)
self.inputconnections = { Specfem3d2Stream.INPUT_NAME : { NAME : Specfem3d2Stream.INPUT_NAME }}
self.outputconnections = { Specfem3d2Stream.OUTPUT_NAME : { NAME : Specfem3d2Stream.OUTPUT_NAME }}
def num(self,s):
try:
return int(s)
except ValueError:
return float(s)
def produceStream(self,filepath):
time,data=numpy.loadtxt(filepath,unpack=True)
head,tail = os.path.split(filepath)
tr=trace.Trace(data)
try:
#assuming that the information are in the filename following the usual convention
tr.stats['network']=tail.split('.')[1]
tr.stats['station']=tail.split('.')[0]
tr.stats['channel']=tail.split('.')[2]
try:
doc = etree.parse(StringIO(open(self.stationsFile).read()))
ns = {"ns": "http://www.fdsn.org/xml/station/1"}
tr.stats['latitude']= self.num(doc.xpath("//ns:Station[@code='"+tr.stats['station']+"']/ns:Latitude/text()",namespaces=ns)[0])
tr.stats['longitude']= self.num(doc.xpath("//ns:Station[@code='"+tr.stats['station']+"']/ns:Longitude/text()",namespaces=ns)[0])
except:
with open(self.stationsFile) as f:
k=False
for line in f:
if (k==False):
k=True
else:
station={}
l=line.strip().split(" ")
if(tr.stats['station']==l[0]):
tr.stats['latitude']=float(l[3])
tr.stats['longitude']=float(l[2])
except:
print traceback.format_exc()
# tr.stats['network']=self.parameters["network"]
# tr.stats['station']=self.parameters["station"]
# tr.stats['channel']=self.parameters["channel"]
tr.stats['starttime']=time[0]
delta=time[1]-time[0]
tr.stats['delta']=delta #maybe decimal here
tr.stats['sampling_rate']=round(1./delta,1) #maybe decimal here
if filepath.endswith('.semv'):
tr.stats['type']="velocity"
if filepath.endswith('.sema'):
tr.stats['type']='acceleration'
if filepath.endswith('.semd'):
tr.stats['type']='displacement'
st=stream.Stream()
st+=stream.Stream(tr)
return st
def process(self, inputs):
print 'computing %s' % inputs
filename = inputs[Specfem3d2Stream.INPUT_NAME][0]
st=self.produceStream(filename)
print 'produced stream %s' % st
return { Specfem3d2Stream.OUTPUT_NAME : [{}, {}, {'data' : pickle.dumps(st)} ] }
from verce.seismo.seismo import SeismoPE
import numpy as np
class WavePlot_INGV(SeismoPE):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdt
def compute(self):
self.outputdest=self.outputdest+"%s" % (self.parameters["filedestination"],);
print 'Writing to %s' % self.outputdest
try:
if not os.path.exists(self.outputdest):
os.makedirs(self.outputdest)
except Exception,e:
self.error+=str(e)
name=str(self.st[0].stats.network) + "." + self.st[0].stats.station + "." + self.st[0].stats.channel
tr = self.st[0]
try:
if tr.stats['type']=="velocity":
name= str(name)+".velocity"
else:
if tr.stats['type']=="acceleration":
name= str(name)+".acceleration"
else:
if tr.stats['type']=="displacement":
name= str(name)+".displacement"
else:
name= str(name)
except Exception, err:
name= str(name)
self.outputdest=self.outputdest+"/"+name+".png"
date="Date: " + str(self.st[0].stats.starttime.date)
fig = WavePlot_INGV.plt.figure()
fig.set_size_inches(12,6)
fig.suptitle(name)
WavePlot_INGV.plt.figtext(0.1, 0.95,date)
ax = fig.add_subplot(len(self.st),1,1)
for i in xrange (len(self.st)):
WavePlot_INGV.plt.subplot(len(self.st),1,i+1,sharex=ax)
t=np.linspace(WavePlot_INGV.mdt.date2num(self.st[i].stats.starttime) ,
WavePlot_INGV.mdt.date2num(self.st[i].stats.endtime) ,
self.st[i].stats.npts)
WavePlot_INGV.plt.plot(t, self.st[i].data,color='gray')
ax.set_xlim(WavePlot_INGV.mdt.date2num(self.st[0].stats.starttime), WavePlot_INGV.mdt.date2num(self.st[-1].stats.endtime))
ax.xaxis.set_major_formatter(WavePlot_INGV.mdt.DateFormatter('%I:%M %p'))
ax.format_xdata = WavePlot_INGV.mdt.DateFormatter('%I:%M %p')
fig1 = WavePlot_INGV.plt.gcf()
WavePlot_INGV.plt.draw()
fig1.savefig(self.outputdest)
__file = open(self.outputdest)
self.streamItemsLocations.append("file://"+socket.gethostname()+os.path.abspath(__file.name))
self.streamItemsFormat.append("image/png")
from scipy.cluster.vq import whiten
import socket
import traceback
class StreamToSeedFile(SeismoPE):
def writeToFile(self,stream,location):
stream.write(location,format='MSEED',encoding='FLOAT32');
__file = open(location)
return os.path.abspath(__file.name)
def compute(self):
self.outputdest=self.outputdest+"%s" % (self.parameters["filedestination"],);
for tr in self.streams[0]:
try:
tr.data=tr.data.filled();
except Exception, err:
tr.data=np.float32(tr.data);
name=str(self.streams[0][0].stats.network) + "." + self.streams[0][0].stats.station + "." + self.streams[0][0].stats.channel
try:
if tr.stats['type']=="velocity":
self.outfile= str(name)+".seedv"
else:
if tr.stats['type']=="acceleration":
self.outfile= str(name)+".seeda"
else:
if tr.stats['type']=="displacement":
self.outfile= str(name)+".seedd"
else:
self.outfile= str(name)+".seed"
except Exception, err:
self.outfile= str(name)+".seed"
#self.outputdest=self.outputdest+"/"+folder
try:
if not os.path.exists(self.outputdest):
os.makedirs(self.outputdest)
except Exception, e:
print "folder exists: "+self.outputdest
self.outputdest=self.outputdest+"/"+self.outfile
path=self.writeToFile(self.streams[0],self.outputdest)
self.streamItemsLocations.append("file://"+socket.gethostname()+path)
self.streamItemsFormat.append("application/octet-stream")
| mit |
xuewei4d/scikit-learn | sklearn/mixture/_bayesian_mixture.py | 7 | 33397 | """Bayesian Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import math
import numpy as np
from scipy.special import betaln, digamma, gammaln
from ._base import BaseMixture, _check_shape
from ._gaussian_mixture import _check_precision_matrix
from ._gaussian_mixture import _check_precision_positivity
from ._gaussian_mixture import _compute_log_det_cholesky
from ._gaussian_mixture import _compute_precision_cholesky
from ._gaussian_mixture import _estimate_gaussian_parameters
from ._gaussian_mixture import _estimate_log_gaussian_prob
from ..utils import check_array
from ..utils.validation import _deprecate_positional_args
def _log_dirichlet_norm(dirichlet_concentration):
"""Compute the log of the Dirichlet distribution normalization term.
Parameters
----------
dirichlet_concentration : array-like of shape (n_samples,)
The parameters values of the Dirichlet distribution.
Returns
-------
log_dirichlet_norm : float
The log normalization of the Dirichlet distribution.
"""
return (gammaln(np.sum(dirichlet_concentration)) -
np.sum(gammaln(dirichlet_concentration)))
def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):
"""Compute the log of the Wishart distribution normalization term.
Parameters
----------
degrees_of_freedom : array-like of shape (n_components,)
The number of degrees of freedom on the covariance Wishart
distributions.
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
n_features : int
The number of features.
Return
------
log_wishart_norm : array-like of shape (n_components,)
The log normalization of the Wishart distribution.
"""
# To simplify the computation we have removed the np.log(np.pi) term
return -(degrees_of_freedom * log_det_precisions_chol +
degrees_of_freedom * n_features * .5 * math.log(2.) +
np.sum(gammaln(.5 * (degrees_of_freedom -
np.arange(n_features)[:, np.newaxis])), 0))
class BayesianGaussianMixture(BaseMixture):
"""Variational Bayesian estimation of a Gaussian mixture.
This class allows to infer an approximate posterior distribution over the
parameters of a Gaussian mixture distribution. The effective number of
components can be inferred from the data.
This class implements two types of prior for the weights distribution: a
finite mixture model with Dirichlet distribution and an infinite mixture
model with the Dirichlet Process. In practice Dirichlet Process inference
algorithm is approximated and uses a truncated distribution with a fixed
maximum number of components (called the Stick-breaking representation).
The number of components actually used almost always depends on the data.
.. versionadded:: 0.18
Read more in the :ref:`User Guide <bgmm>`.
Parameters
----------
n_components : int, default=1
The number of mixture components. Depending on the data and the value
of the `weight_concentration_prior` the model can decide to not use
all the components by setting some component `weights_` to values very
close to zero. The number of effective components is therefore smaller
than n_components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix),
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance).
tol : float, default=1e-3
The convergence threshold. EM iterations will stop when the
lower bound average gain on the likelihood (of the training data with
respect to the model) is below this threshold.
reg_covar : float, default=1e-6
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, default=100
The number of EM iterations to perform.
n_init : int, default=1
The number of initializations to perform. The result with the highest
lower bound value on the likelihood is kept.
init_params : {'kmeans', 'random'}, default='kmeans'
The method used to initialize the weights, the means and the
covariances.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weight_concentration_prior_type : str, default='dirichlet_process'
String describing the type of the weight concentration prior.
Must be one of::
'dirichlet_process' (using the Stick-breaking representation),
'dirichlet_distribution' (can favor more uniform weights).
weight_concentration_prior : float | None, default=None.
The dirichlet concentration of each component on the weight
distribution (Dirichlet). This is commonly called gamma in the
literature. The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
mixture weights simplex. The value of the parameter must be greater
than 0. If it is None, it's set to ``1. / n_components``.
mean_precision_prior : float | None, default=None.
The precision prior on the mean distribution (Gaussian).
Controls the extent of where means can be placed. Larger
values concentrate the cluster means around `mean_prior`.
The value of the parameter must be greater than 0.
If it is None, it is set to 1.
mean_prior : array-like, shape (n_features,), default=None.
The prior on the mean distribution (Gaussian).
If it is None, it is set to the mean of X.
degrees_of_freedom_prior : float | None, default=None.
The prior of the number of degrees of freedom on the covariance
distributions (Wishart). If it is None, it's set to `n_features`.
covariance_prior : float or array-like, default=None.
The prior on the covariance distribution (Wishart).
If it is None, the emiprical covariance prior is initialized using the
covariance of X. The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
random_state : int, RandomState instance or None, default=None
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
See :term:`the Glossary <warm_start>`.
verbose : int, default=0
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default=10
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like of shape (n_components,)
The weights of each mixture components.
means_ : array-like of shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of inference to reach the
convergence.
lower_bound_ : float
Lower bound value on the likelihood (of the training data with
respect to the model) of the best fit of inference.
weight_concentration_prior_ : tuple or float
The dirichlet concentration of each component on the weight
distribution (Dirichlet). The type depends on
``weight_concentration_prior_type``::
(float, float) if 'dirichlet_process' (Beta parameters),
float if 'dirichlet_distribution' (Dirichlet parameters).
The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
simplex.
weight_concentration_ : array-like of shape (n_components,)
The dirichlet concentration of each component on the weight
distribution (Dirichlet).
mean_precision_prior_ : float
The precision prior on the mean distribution (Gaussian).
Controls the extent of where means can be placed.
Larger values concentrate the cluster means around `mean_prior`.
If mean_precision_prior is set to None, `mean_precision_prior_` is set
to 1.
mean_precision_ : array-like of shape (n_components,)
The precision of each components on the mean distribution (Gaussian).
mean_prior_ : array-like of shape (n_features,)
The prior on the mean distribution (Gaussian).
degrees_of_freedom_prior_ : float
The prior of the number of degrees of freedom on the covariance
distributions (Wishart).
degrees_of_freedom_ : array-like of shape (n_components,)
The number of degrees of freedom of each components in the model.
covariance_prior_ : float or array-like
The prior on the covariance distribution (Wishart).
The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
Examples
--------
>>> import numpy as np
>>> from sklearn.mixture import BayesianGaussianMixture
>>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]])
>>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X)
>>> bgm.means_
array([[2.49... , 2.29...],
[8.45..., 4.52... ]])
>>> bgm.predict([[0, 0], [9, 3]])
array([0, 1])
See Also
--------
GaussianMixture : Finite Gaussian mixture fit with EM.
References
----------
.. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine
learning". Vol. 4 No. 4. New York: Springer.
<https://www.springer.com/kr/book/9780387310732>`_
.. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for
Graphical Models". In Advances in Neural Information Processing
Systems 12.
<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.36.2841&rep=rep1&type=pdf>`_
.. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational
inference for Dirichlet process mixtures". Bayesian analysis 1.1
<https://www.cs.princeton.edu/courses/archive/fall11/cos597C/reading/BleiJordan2005.pdf>`_
"""
@_deprecate_positional_args
def __init__(self, *, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=None,
mean_precision_prior=None, mean_prior=None,
degrees_of_freedom_prior=None, covariance_prior=None,
random_state=None, warm_start=False, verbose=0,
verbose_interval=10):
super().__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weight_concentration_prior_type = weight_concentration_prior_type
self.weight_concentration_prior = weight_concentration_prior
self.mean_precision_prior = mean_precision_prior
self.mean_prior = mean_prior
self.degrees_of_freedom_prior = degrees_of_freedom_prior
self.covariance_prior = covariance_prior
def _check_parameters(self, X):
"""Check that the parameters are well defined.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if (self.weight_concentration_prior_type not in
['dirichlet_process', 'dirichlet_distribution']):
raise ValueError(
"Invalid value for 'weight_concentration_prior_type': %s "
"'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% self.weight_concentration_prior_type)
self._check_weights_parameters()
self._check_means_parameters(X)
self._check_precision_parameters(X)
self._checkcovariance_prior_parameter(X)
def _check_weights_parameters(self):
"""Check the parameter of the Dirichlet distribution."""
if self.weight_concentration_prior is None:
self.weight_concentration_prior_ = 1. / self.n_components
elif self.weight_concentration_prior > 0.:
self.weight_concentration_prior_ = (
self.weight_concentration_prior)
else:
raise ValueError("The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% self.weight_concentration_prior)
def _check_means_parameters(self, X):
"""Check the parameters of the Gaussian distribution.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.mean_precision_prior is None:
self.mean_precision_prior_ = 1.
elif self.mean_precision_prior > 0.:
self.mean_precision_prior_ = self.mean_precision_prior
else:
raise ValueError("The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% self.mean_precision_prior)
if self.mean_prior is None:
self.mean_prior_ = X.mean(axis=0)
else:
self.mean_prior_ = check_array(self.mean_prior,
dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.mean_prior_, (n_features, ), 'means')
def _check_precision_parameters(self, X):
"""Check the prior parameters of the precision distribution.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.degrees_of_freedom_prior is None:
self.degrees_of_freedom_prior_ = n_features
elif self.degrees_of_freedom_prior > n_features - 1.:
self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior
else:
raise ValueError("The parameter 'degrees_of_freedom_prior' "
"should be greater than %d, but got %.3f."
% (n_features - 1, self.degrees_of_freedom_prior))
def _checkcovariance_prior_parameter(self, X):
"""Check the `covariance_prior_`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.covariance_prior is None:
self.covariance_prior_ = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()
}[self.covariance_type]
elif self.covariance_type in ['full', 'tied']:
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features, n_features),
'%s covariance_prior' % self.covariance_type)
_check_precision_matrix(self.covariance_prior_,
self.covariance_type)
elif self.covariance_type == 'diag':
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features,),
'%s covariance_prior' % self.covariance_type)
_check_precision_positivity(self.covariance_prior_,
self.covariance_type)
# spherical case
elif self.covariance_prior > 0.:
self.covariance_prior_ = self.covariance_prior
else:
raise ValueError("The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% self.covariance_prior)
def _initialize(self, X, resp):
"""Initialization of the mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
nk, xk, sk = _estimate_gaussian_parameters(X, resp, self.reg_covar,
self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_weights(self, nk):
"""Estimate the parameters of the Dirichlet distribution.
Parameters
----------
nk : array-like of shape (n_components,)
"""
if self.weight_concentration_prior_type == 'dirichlet_process':
# For dirichlet process weight_concentration will be a tuple
# containing the two parameters of the beta distribution
self.weight_concentration_ = (
1. + nk,
(self.weight_concentration_prior_ +
np.hstack((np.cumsum(nk[::-1])[-2::-1], 0))))
else:
# case Variationnal Gaussian mixture with dirichlet distribution
self.weight_concentration_ = self.weight_concentration_prior_ + nk
def _estimate_means(self, nk, xk):
"""Estimate the parameters of the Gaussian distribution.
Parameters
----------
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
"""
self.mean_precision_ = self.mean_precision_prior_ + nk
self.means_ = ((self.mean_precision_prior_ * self.mean_prior_ +
nk[:, np.newaxis] * xk) /
self.mean_precision_[:, np.newaxis])
def _estimate_precisions(self, nk, xk, sk):
"""Estimate the precisions parameters of the precision distribution.
Parameters
----------
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like
The shape depends of `covariance_type`:
'full' : (n_components, n_features, n_features)
'tied' : (n_features, n_features)
'diag' : (n_components, n_features)
'spherical' : (n_components,)
"""
{"full": self._estimate_wishart_full,
"tied": self._estimate_wishart_tied,
"diag": self._estimate_wishart_diag,
"spherical": self._estimate_wishart_spherical
}[self.covariance_type](nk, xk, sk)
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_wishart_full(self, nk, xk, sk):
"""Estimate the full Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components, n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is
# the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
self.covariances_ = np.empty((self.n_components, n_features,
n_features))
for k in range(self.n_components):
diff = xk[k] - self.mean_prior_
self.covariances_[k] = (self.covariance_prior_ + nk[k] * sk[k] +
nk[k] * self.mean_precision_prior_ /
self.mean_precision_[k] * np.outer(diff,
diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= (
self.degrees_of_freedom_[:, np.newaxis, np.newaxis])
def _estimate_wishart_tied(self, nk, xk, sk):
"""Estimate the tied Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = (
self.degrees_of_freedom_prior_ + nk.sum() / self.n_components)
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + sk * nk.sum() / self.n_components +
self.mean_precision_prior_ / self.n_components * np.dot(
(nk / self.mean_precision_) * diff.T, diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _estimate_wishart_diag(self, nk, xk, sk):
"""Estimate the diag Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + nk[:, np.newaxis] * (
sk + (self.mean_precision_prior_ /
self.mean_precision_)[:, np.newaxis] * np.square(diff)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
def _estimate_wishart_spherical(self, nk, xk, sk):
"""Estimate the spherical Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components,)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + nk * (
sk + self.mean_precision_prior_ / self.mean_precision_ *
np.mean(np.square(diff), 1)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
nk, xk, sk = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_log_weights(self):
if self.weight_concentration_prior_type == 'dirichlet_process':
digamma_sum = digamma(self.weight_concentration_[0] +
self.weight_concentration_[1])
digamma_a = digamma(self.weight_concentration_[0])
digamma_b = digamma(self.weight_concentration_[1])
return (digamma_a - digamma_sum +
np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])))
else:
# case Variationnal Gaussian mixture with dirichlet distribution
return (digamma(self.weight_concentration_) -
digamma(np.sum(self.weight_concentration_)))
def _estimate_log_prob(self, X):
_, n_features = X.shape
# We remove `n_features * np.log(self.degrees_of_freedom_)` because
# the precision matrix is normalized
log_gauss = (_estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type) -
.5 * n_features * np.log(self.degrees_of_freedom_))
log_lambda = n_features * np.log(2.) + np.sum(digamma(
.5 * (self.degrees_of_freedom_ -
np.arange(0, n_features)[:, np.newaxis])), 0)
return log_gauss + .5 * (log_lambda -
n_features / self.mean_precision_)
def _compute_lower_bound(self, log_resp, log_prob_norm):
"""Estimate the lower bound of the model.
The lower bound on the likelihood (of the training data with respect to
the model) is used to detect the convergence and has to increase at
each iteration.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
log_prob_norm : float
Logarithm of the probability of each sample in X.
Returns
-------
lower_bound : float
"""
# Contrary to the original formula, we have done some simplification
# and removed all the constant terms.
n_features, = self.mean_prior_.shape
# We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
# because the precision matrix is normalized.
log_det_precisions_chol = (_compute_log_det_cholesky(
self.precisions_cholesky_, self.covariance_type, n_features) -
.5 * n_features * np.log(self.degrees_of_freedom_))
if self.covariance_type == 'tied':
log_wishart = self.n_components * np.float64(_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features))
else:
log_wishart = np.sum(_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features))
if self.weight_concentration_prior_type == 'dirichlet_process':
log_norm_weight = -np.sum(betaln(self.weight_concentration_[0],
self.weight_concentration_[1]))
else:
log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)
return (-np.sum(np.exp(log_resp) * log_resp) -
log_wishart - log_norm_weight -
0.5 * n_features * np.sum(np.log(self.mean_precision_)))
def _get_parameters(self):
return (self.weight_concentration_,
self.mean_precision_, self.means_,
self.degrees_of_freedom_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weight_concentration_, self.mean_precision_, self.means_,
self.degrees_of_freedom_, self.covariances_,
self.precisions_cholesky_) = params
# Weights computation
if self.weight_concentration_prior_type == "dirichlet_process":
weight_dirichlet_sum = (self.weight_concentration_[0] +
self.weight_concentration_[1])
tmp = self.weight_concentration_[1] / weight_dirichlet_sum
self.weights_ = (
self.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
self.weights_ /= np.sum(self.weights_)
else:
self. weights_ = (self.weight_concentration_ /
np.sum(self.weight_concentration_))
# Precisions matrices computation
if self.covariance_type == 'full':
self.precisions_ = np.array([
np.dot(prec_chol, prec_chol.T)
for prec_chol in self.precisions_cholesky_])
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
| bsd-3-clause |
abulak/TDA-Cause-Effect-Pairs | identify_outliers.py | 1 | 6251 | import os
import sys
import numpy as np
import numpy.ma as ma
import logging
from sklearn.neighbors import NearestNeighbors
import scipy.spatial as spsp
def standardise(points):
"""
Standardise points, i.e. mean = 0 and standard deviation = 1 in both
dimensions
:param points: np.array
:return: np.array
"""
for i in range(points.shape[1]):
p = points[:, i]
mean = np.mean(p)
std = np.std(p)
p -= mean
if std:
p /= std
return points
class OutlierRemoval:
"""A Class encapsulating everything what we do to the pairs-data
string self.name file name
np.array self.raw_data data loaded from file
self.orig_points (potentially) sampled points
self.points points we use for all computations
watch out using them!
self.cleaned_points orig_points - outliers
list self.outliers list of indices of detected outlers in
orig_points
string self.target_dir (path) where to save all the points/outliers
"""
def __init__(self, model="knn"):
self.current_dir = os.getcwd()
self.name = self.current_dir[-8:]
logging.basicConfig(filename=self.name+".log", level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
points = np.loadtxt(os.path.join(self.current_dir, 'points.std'))
self.points = standardise(points)
self.model = model
self.dimension = self.points.shape[1]
self.n_of_outliers = int(15 * self.points.shape[0] / 100.0)
@staticmethod
def find_single_outlier_knn(points, k_nearest):
neigh = NearestNeighbors()
neigh.fit(points)
distances, indices = neigh.kneighbors(points, k_nearest)
distances_partial = distances[:, 1:k_nearest+1]
distances_vector = distances_partial.sum(1)
outlier = distances_vector.argmax()
return outlier
@staticmethod
def compute_offset(outliers, i):
offset = 0
for j, x in reversed(list(enumerate(outliers[:i]))):
if x <= outliers[i]+offset:
offset += 1
return offset
def find_outliers_knn(self, k_nearest):
masked_points = ma.MaskedArray(self.points)
shape = self.points.shape
outliers = []
for i in range(self.n_of_outliers):
pts = masked_points.compressed().reshape((shape[0] - i, shape[1]))
pts = standardise(pts)
out_index = self.find_single_outlier_knn(pts, k_nearest)
outliers.append(out_index)
masked_points = ma.MaskedArray(pts)
masked_points[out_index] = ma.masked
logging.debug("%d of %d", out_index, self.n_of_outliers)
offsets = [self.compute_offset(outliers, i)
for i in range(len(outliers))]
true_outliers = [out + offsets[i] for i, out in enumerate(outliers)]
if len(true_outliers) != len(set(true_outliers)):
logging.warning("There are some duplicates in the outliers! %s",
str(true_outliers))
# assert len(true_outliers) == len(set(true_outliers)), \
# "There are some duplicates in the outliers!"
return true_outliers
def find_outliers_knn_old(self, k_nearest):
neigh = NearestNeighbors()
neigh.fit(self.points)
distances, indices = neigh.kneighbors(self.points,
k_nearest + self.n_of_outliers)
outliers = []
for out in range(self.n_of_outliers):
logging.debug("%d of %d", out, self.n_of_outliers)
distances_partial = distances[:, 1:k_nearest+1]
distances_vector = distances_partial.sum(1)
outlier = distances_vector.argmax()
outliers.append(outlier)
distances[outlier] = np.zeros(k_nearest + self.n_of_outliers)
for i, row in enumerate(indices):
if outlier in row:
distances[i][np.where(row == outlier)[0][0]] = 1000
distances[i].sort()
return outliers
def find_outliers_all(self):
distances_matrix = spsp.distance_matrix(self.points, self.points)
outliers = []
distances_vector = ma.masked_array(np.sum(distances_matrix, axis=1))
for out in range(self.n_of_outliers):
outlier = distances_vector.argmax()
logging.debug("%d of %d", self.n_of_outliers, out)
outliers.append(outlier)
distances_vector -= distances_matrix[:, outlier]
distances_vector[outlier] = ma.masked
return outliers
def find_outliers(self):
"""Procedure finding outliers based on nearest neighbours.
if neighbours == 0 then all other points are taken into the account
Outliers (their indexes in self.points) are stored in self.outliers"""
logging.info("Finding %s %d outliers in %s", self.model,
self.n_of_outliers, self.name)
nearest_neighbours = int(2 * self.points.shape[0] / 100) + 5
if self.model == 'all': # outlier based on max distance to all others
self.outliers = self.find_outliers_all()
elif self.model == 'knn_old':
self.outliers = self.find_outliers_knn_old(nearest_neighbours)
elif self.model == 'knn':
self.outliers = self.find_outliers_knn(nearest_neighbours)
else:
logging.warning('Unknown model of outliers! Available are: all, '
'knn_old, knn')
logging.info('Done with outliers!')
def save_outliers(self):
np.savetxt(os.path.join(self.current_dir, "outliers." + self.model),
np.asarray(self.outliers, dtype=int), fmt='%d')
def workflow(model):
p = OutlierRemoval(model)
p.find_outliers()
p.save_outliers()
if __name__ == "__main__":
if len(sys.argv) == 2:
model = sys.argv[1]
workflow(model)
else:
print("Usage: identify_outliers.py $MODEL")
| gpl-2.0 |
poryfly/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
mjlong/openmc | tests/test_mgxs_library_nuclides/test_mgxs_library_nuclides.py | 2 | 2643 | #!/usr/bin/env python
import os
import sys
import glob
import hashlib
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
import openmc
import openmc.mgxs
class MGXSTestHarness(PyAPITestHarness):
def _build_inputs(self):
# The openmc.mgxs module needs a summary.h5 file
self._input_set.settings.output = {'summary': True}
# Generate inputs using parent class routine
super(MGXSTestHarness, self)._build_inputs()
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625e-6, 20.])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._input_set.geometry.geometry)
self.mgxs_lib.by_nuclide = True
self.mgxs_lib.mgxs_types = ['transport', 'nu-fission',
'nu-scatter matrix', 'chi']
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Initialize a tallies file
self._input_set.tallies = openmc.TalliesFile()
self.mgxs_lib.add_to_tallies_file(self._input_set.tallies, merge=False)
self._input_set.tallies.export_to_xml()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Read the summary file.
summary = glob.glob(os.path.join(os.getcwd(), 'summary.h5'))[0]
su = openmc.Summary(summary)
sp.link_with_summary(su)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string()
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _cleanup(self):
super(MGXSTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = MGXSTestHarness('statepoint.10.*', True)
harness.main()
| mit |
PaddlePaddle/models | PaddleCV/gan/trainer/CGAN.py | 1 | 8924 | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from network.CGAN_network import CGAN_model
from util import utility
import sys
import six
import os
import numpy as np
import time
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import paddle.fluid as fluid
class GTrainer():
def __init__(self, input, conditions, cfg):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
model = CGAN_model(cfg.batch_size)
self.fake = model.network_G(input, conditions, name="G")
self.infer_program = self.program.clone(for_test=True)
d_fake = model.network_D(self.fake, conditions, name="D")
batch = fluid.layers.shape(input)[0]
fake_labels = fluid.layers.fill_constant(
dtype='float32', shape=[batch, 1], value=1.0)
self.g_loss = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake, label=fake_labels))
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and (var.name.startswith("G")):
vars.append(var.name)
optimizer = fluid.optimizer.Adam(
learning_rate=cfg.learning_rate, beta1=0.5, name="net_G")
optimizer.minimize(self.g_loss, parameter_list=vars)
class DTrainer():
def __init__(self, input, conditions, labels, cfg):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
model = CGAN_model(cfg.batch_size)
d_logit = model.network_D(input, conditions, name="D")
self.d_loss = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_logit, label=labels))
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and (var.name.startswith("D")):
vars.append(var.name)
optimizer = fluid.optimizer.Adam(
learning_rate=cfg.learning_rate, beta1=0.5, name="net_D")
optimizer.minimize(self.d_loss, parameter_list=vars)
class CGAN(object):
def add_special_args(self, parser):
parser.add_argument(
'--noise_size', type=int, default=100, help="the noise dimension")
return parser
def __init__(self, cfg=None, train_reader=None):
self.cfg = cfg
self.train_reader = train_reader
def build_model(self):
img = fluid.data(name='img', shape=[None, 784], dtype='float32')
condition = fluid.data(
name='condition', shape=[None, 1], dtype='float32')
noise = fluid.data(
name='noise', shape=[None, self.cfg.noise_size], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='float32')
g_trainer = GTrainer(noise, condition, self.cfg)
d_trainer = DTrainer(img, condition, label, self.cfg)
# prepare environment
place = fluid.CUDAPlace(0) if self.cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
const_n = np.random.uniform(
low=-1.0, high=1.0,
size=[self.cfg.batch_size, self.cfg.noise_size]).astype('float32')
if self.cfg.init_model:
utility.init_checkpoints(self.cfg, g_trainer, "net_G")
utility.init_checkpoints(self.cfg, d_trainer, "net_D")
### memory optim
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
g_trainer_program = fluid.CompiledProgram(
g_trainer.program).with_data_parallel(
loss_name=g_trainer.g_loss.name, build_strategy=build_strategy)
d_trainer_program = fluid.CompiledProgram(
d_trainer.program).with_data_parallel(
loss_name=d_trainer.d_loss.name, build_strategy=build_strategy)
if self.cfg.run_test:
image_path = os.path.join(self.cfg.output, 'test')
if not os.path.exists(image_path):
os.makedirs(image_path)
t_time = 0
for epoch_id in range(self.cfg.epoch):
for batch_id, data in enumerate(self.train_reader()):
if len(data) != self.cfg.batch_size:
continue
noise_data = np.random.uniform(
low=-1.0,
high=1.0,
size=[self.cfg.batch_size, self.cfg.noise_size]).astype(
'float32')
real_image = np.array(list(map(lambda x: x[0], data))).reshape(
[-1, 784]).astype('float32')
condition_data = np.array([x[1] for x in data]).reshape(
[-1, 1]).astype('float32')
real_label = np.ones(
shape=[real_image.shape[0], 1], dtype='float32')
fake_label = np.zeros(
shape=[real_image.shape[0], 1], dtype='float32')
s_time = time.time()
generate_image = exe.run(
g_trainer.infer_program,
feed={'noise': noise_data,
'condition': condition_data},
fetch_list=[g_trainer.fake])
d_real_loss = exe.run(d_trainer_program,
feed={
'img': real_image,
'condition': condition_data,
'label': real_label
},
fetch_list=[d_trainer.d_loss])[0]
d_fake_loss = exe.run(d_trainer_program,
feed={
'img': generate_image[0],
'condition': condition_data,
'label': fake_label
},
fetch_list=[d_trainer.d_loss])[0]
d_loss = d_real_loss + d_fake_loss
for _ in six.moves.xrange(self.cfg.num_generator_time):
g_loss = exe.run(g_trainer_program,
feed={
'noise': noise_data,
'condition': condition_data
},
fetch_list=[g_trainer.g_loss])[0]
batch_time = time.time() - s_time
if batch_id % self.cfg.print_freq == 0:
print(
'Epoch ID: {} Batch ID: {} D_loss: {} G_loss: {} Batch_time_cost: {}'.
format(epoch_id, batch_id, d_loss[0], g_loss[0],
batch_time))
t_time += batch_time
if self.cfg.run_test:
generate_const_image = exe.run(
g_trainer.infer_program,
feed={'noise': const_n,
'condition': condition_data},
fetch_list=[g_trainer.fake])[0]
generate_image_reshape = np.reshape(generate_const_image, (
self.cfg.batch_size, -1))
total_images = np.concatenate(
[real_image, generate_image_reshape])
fig = utility.plot(total_images)
plt.title('Epoch ID={}, Batch ID={}'.format(epoch_id,
batch_id))
img_name = '{:04d}_{:04d}.png'.format(epoch_id, batch_id)
plt.savefig(
os.path.join(image_path, img_name), bbox_inches='tight')
plt.close(fig)
if self.cfg.save_checkpoints:
utility.checkpoints(epoch_id, self.cfg, g_trainer, "net_G")
utility.checkpoints(epoch_id, self.cfg, d_trainer, "net_D")
| apache-2.0 |
woozzu/pylearn2 | pylearn2/scripts/datasets/browse_norb.py | 44 | 15741 | #!/usr/bin/env python
"""
A browser for the NORB and small NORB datasets. Navigate the images by
choosing the values for the label vector. Note that for the 'big' NORB
dataset, you can only set the first 5 label dimensions. You can then cycle
through the 3-12 images that fit those labels.
"""
import sys
import os
import argparse
import numpy
import warnings
try:
import matplotlib
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
matplotlib = None
pyplot = None
from pylearn2.datasets.new_norb import NORB
from pylearn2.utils import safe_zip, serial
def _parse_args():
parser = argparse.ArgumentParser(
description="Browser for NORB dataset.")
parser.add_argument('--which_norb',
type=str,
required=False,
choices=('big', 'small'),
help="'Selects the (big) NORB, or the Small NORB.")
parser.add_argument('--which_set',
type=str,
required=False,
choices=('train', 'test', 'both'),
help="'train', or 'test'")
parser.add_argument('--pkl',
type=str,
required=False,
help=".pkl file of NORB dataset")
parser.add_argument('--stereo_viewer',
action='store_true',
help="Swaps left and right stereo images, so you "
"can see them in 3D by crossing your eyes.")
parser.add_argument('--no_norm',
action='store_true',
help="Don't normalize pixel values")
result = parser.parse_args()
if (result.pkl is not None) == (result.which_norb is not None or
result.which_set is not None):
print("Must supply either --pkl, or both --which_norb and "
"--which_set.")
sys.exit(1)
if (result.which_norb is None) != (result.which_set is None):
print("When not supplying --pkl, you must supply both "
"--which_norb and --which_set.")
sys.exit(1)
if result.pkl is not None:
if not result.pkl.endswith('.pkl'):
print("--pkl must be a filename that ends in .pkl")
sys.exit(1)
if not os.path.isfile(result.pkl):
print("couldn't find --pkl file '%s'" % result.pkl)
sys.exit(1)
return result
def _make_grid_to_short_label(dataset):
"""
Returns an array x such that x[a][b] gives label index a's b'th unique
value. In other words, it maps label grid indices a, b to the
corresponding label value.
"""
unique_values = [sorted(list(frozenset(column)))
for column
in dataset.y[:, :5].transpose()]
# If dataset contains blank images, removes the '-1' labels
# corresponding to blank images, since they aren't contained in the
# label grid.
category_index = dataset.label_name_to_index['category']
unique_categories = unique_values[category_index]
category_to_name = dataset.label_to_value_funcs[category_index]
if any(category_to_name(category) == 'blank'
for category in unique_categories):
for d in range(1, len(unique_values)):
assert unique_values[d][0] == -1, ("unique_values: %s" %
str(unique_values))
unique_values[d] = unique_values[d][1:]
return unique_values
def _get_blank_label(dataset):
"""
Returns the label vector associated with blank images.
If dataset is a Small NORB (i.e. it has no blank images), this returns
None.
"""
category_index = dataset.label_name_to_index['category']
category_to_name = dataset.label_to_value_funcs[category_index]
blank_label = 5
try:
blank_name = category_to_name(blank_label)
except ValueError:
# Returns None if there is no 'blank' category (e.g. if we're using
# the small NORB dataset.
return None
assert blank_name == 'blank'
blank_rowmask = dataset.y[:, category_index] == blank_label
blank_labels = dataset.y[blank_rowmask, :]
if not blank_rowmask.any():
return None
if not numpy.all(blank_labels[0, :] == blank_labels[1:, :]):
raise ValueError("Expected all labels of category 'blank' to have "
"the same value, but they differed.")
return blank_labels[0, :].copy()
def _make_label_to_row_indices(labels):
"""
Returns a map from short labels (the first 5 elements of the label
vector) to the list of row indices of rows in the dense design matrix
with that label.
For Small NORB, all unique short labels have exactly one row index.
For big NORB, a short label can have 0-N row indices.
"""
result = {}
for row_index, label in enumerate(labels):
short_label = tuple(label[:5])
if result.get(short_label, None) is None:
result[short_label] = []
result[short_label].append(row_index)
return result
def main():
"""Top-level function."""
args = _parse_args()
if args.pkl is not None:
dataset = serial.load(args.pkl)
else:
dataset = NORB(args.which_norb, args.which_set)
# Indexes into the first 5 labels, which live on a 5-D grid.
grid_indices = [0, ] * 5
grid_to_short_label = _make_grid_to_short_label(dataset)
# Maps 5-D label vector to a list of row indices for dataset.X, dataset.y
# that have those labels.
label_to_row_indices = _make_label_to_row_indices(dataset.y)
# Indexes into the row index lists returned by label_to_row_indices.
object_image_index = [0, ]
blank_image_index = [0, ]
blank_label = _get_blank_label(dataset)
# Index into grid_indices currently being edited
grid_dimension = [0, ]
dataset_is_stereo = 's' in dataset.view_converter.axes
figure, all_axes = pyplot.subplots(1,
3 if dataset_is_stereo else 2,
squeeze=True,
figsize=(10, 3.5))
set_name = (os.path.split(args.pkl)[1] if args.which_set is None
else "%sing set" % args.which_set)
figure.canvas.set_window_title("NORB dataset (%s)" % set_name)
label_text = figure.suptitle('Up/down arrows choose label, '
'left/right arrows change it',
x=0.1,
horizontalalignment="left")
# Hides axes' tick marks
for axes in all_axes:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
text_axes, image_axes = (all_axes[0], all_axes[1:])
image_captions = (('left', 'right') if dataset_is_stereo
else ('mono image', ))
if args.stereo_viewer:
image_captions = tuple(reversed(image_captions))
for image_ax, caption in safe_zip(image_axes, image_captions):
image_ax.set_title(caption)
text_axes.set_frame_on(False) # Hides background of text_axes
def is_blank(grid_indices):
assert len(grid_indices) == 5
assert all(x >= 0 for x in grid_indices)
ci = dataset.label_name_to_index['category'] # category index
category = grid_to_short_label[ci][grid_indices[ci]]
category_name = dataset.label_to_value_funcs[ci](category)
return category_name == 'blank'
def get_short_label(grid_indices):
"""
Returns the first 5 elements of the label vector pointed to by
grid_indices. We use the first 5, since they're the labels used by
both the 'big' and Small NORB datasets.
"""
# Need to special-case the 'blank' category, since it lies outside of
# the grid.
if is_blank(grid_indices): # won't happen with SmallNORB
return tuple(blank_label[:5])
else:
return tuple(grid_to_short_label[i][g]
for i, g in enumerate(grid_indices))
def get_row_indices(grid_indices):
short_label = get_short_label(grid_indices)
return label_to_row_indices.get(short_label, None)
axes_to_pixels = {}
def redraw(redraw_text, redraw_images):
row_indices = get_row_indices(grid_indices)
if row_indices is None:
row_index = None
image_index = 0
num_images = 0
else:
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)[0]
row_index = row_indices[image_index]
num_images = len(row_indices)
def draw_text():
if row_indices is None:
padding_length = dataset.y.shape[1] - len(grid_indices)
current_label = (tuple(get_short_label(grid_indices)) +
(0, ) * padding_length)
else:
current_label = dataset.y[row_index, :]
label_names = dataset.label_index_to_name
label_values = [label_to_value(label) for label_to_value, label
in safe_zip(dataset.label_to_value_funcs,
current_label)]
lines = ['%s: %s' % (t, v)
for t, v
in safe_zip(label_names, label_values)]
if dataset.y.shape[1] > 5:
# Inserts image number & blank line between editable and
# fixed labels.
lines = (lines[:5] +
['No such image' if num_images == 0
else 'image: %d of %d' % (image_index + 1,
num_images),
'\n'] +
lines[5:])
# prepends the current index's line with an arrow.
lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]]
text_axes.clear()
# "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right.
text_axes.text(0, 0.5, # coords
'\n'.join(lines),
verticalalignment='center',
transform=text_axes.transAxes)
def draw_images():
if row_indices is None:
for axis in image_axes:
axis.clear()
else:
data_row = dataset.X[row_index:row_index + 1, :]
axes_names = dataset.view_converter.axes
assert len(axes_names) in (4, 5)
assert axes_names[0] == 'b'
assert axes_names[-3] == 0
assert axes_names[-2] == 1
assert axes_names[-1] == 'c'
def draw_image(image, axes):
assert len(image.shape) == 2
norm = matplotlib.colors.NoNorm() if args.no_norm else None
axes_to_pixels[axes] = image
axes.imshow(image, norm=norm, cmap='gray')
if 's' in axes_names:
image_pair = \
dataset.get_topological_view(mat=data_row,
single_tensor=True)
# Shaves off the singleton dimensions
# (batch # and channel #), leaving just 's', 0, and 1.
image_pair = tuple(image_pair[0, :, :, :, 0])
if args.stereo_viewer:
image_pair = tuple(reversed(image_pair))
for axis, image in safe_zip(image_axes, image_pair):
draw_image(image, axis)
else:
image = dataset.get_topological_view(mat=data_row)
image = image[0, :, :, 0]
draw_image(image, image_axes[0])
if redraw_text:
draw_text()
if redraw_images:
draw_images()
figure.canvas.draw()
default_status_text = ("mouseover image%s for pixel values" %
("" if len(image_axes) == 1 else "s"))
status_text = figure.text(0.5, 0.1, default_status_text)
def on_mouse_motion(event):
original_text = status_text.get_text()
if event.inaxes not in image_axes:
status_text.set_text(default_status_text)
else:
pixels = axes_to_pixels[event.inaxes]
row = int(event.ydata + .5)
col = int(event.xdata + .5)
status_text.set_text("Pixel value: %g" % pixels[row, col])
if status_text.get_text != original_text:
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_index_type(step):
num_dimensions = len(grid_indices)
if dataset.y.shape[1] > 5:
# If dataset is big NORB, add one for the image index
num_dimensions += 1
grid_dimension[0] = add_mod(grid_dimension[0],
step,
num_dimensions)
def incr_index(step):
assert step in (0, -1, 1), ("Step was %d" % step)
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)
if grid_dimension[0] == 5: # i.e. the image index
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# increment the image index
image_index[0] = add_mod(image_index[0],
step,
len(row_indices))
else:
# increment one of the grid indices
gd = grid_dimension[0]
grid_indices[gd] = add_mod(grid_indices[gd],
step,
len(grid_to_short_label[gd]))
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# some grid indices have 2 images instead of 3.
image_index[0] = min(image_index[0], len(row_indices))
# Disables left/right key if we're currently showing a blank,
# and the current index type is neither 'category' (0) nor
# 'image number' (5)
disable_left_right = (is_blank(grid_indices) and
not (grid_dimension[0] in (0, 5)))
if event.key == 'up':
incr_index_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_index_type(1)
redraw(True, False)
elif event.key == 'q':
sys.exit(0)
elif not disable_left_right:
if event.key == 'left':
incr_index(-1)
redraw(True, True)
elif event.key == 'right':
incr_index(1)
redraw(True, True)
figure.canvas.mpl_connect('key_press_event', on_key_press)
figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.