markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
Multiple linear regression
Add topings to our model of pizza price prediction
|
from numpy.linalg import inv
from numpy import dot, transpose
X = [[1, 6, 2], [1, 8, 1], [1, 10, 0], [1, 14, 2], [1, 18, 0]]
X
y = [[7], [9], [13], [17.5], [18]]
#Solve using linear algebra
dot(inv(dot(transpose(X),X)), dot(transpose(X),y))
#Solve using numpy least squares procedure
from numpy.linalg import lstsq
lstsq(X,y)[0]
#Compare simple vs multinomial
X = [[6, 2], [8, 1], [10, 0], [14, 2], [18, 0]]
Y = [[7], [9], [13], [17.5], [18]]
model = LinearRegression()
model.fit(X,Y)
X_test = [[8, 2], [9, 0], [11, 2], [16, 2], [12, 0]]
Y_test = [[11], [8.5], [15], [18], [11]]
predictions = model.predict(X_test)
for i, prediction in enumerate(predictions):
print("Predicted: %s, Target: %s" %(prediction, Y_test[i]))
print("R square:", model.score(X_test, Y_test))
|
Chapters/Two/Simple Linear Regression.ipynb
|
fadeetch/Mastering-ML-Python
|
mit
|
Polynomial regression
Use PolynomialFeatures to transform the data
|
from sklearn.preprocessing import PolynomialFeatures
X_train = [[6], [8], [10], [14], [18]]
y_train = [[7], [9], [13], [17.5], [18]]
X_test = [[6], [8], [11], [16]]
y_test = [[8], [12], [15], [18]]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
xx = np.linspace(0, 26, 100)
xx
yy = regressor.predict(xx.reshape(xx.shape[0],1))
plt.plot(xx,yy)
quadratic_featureziser = PolynomialFeatures(degree=2)
X_train_quadratic = quadratic_featureziser.fit_transform(X_train)
X_train_quadratic
X_test_quadratic = quadratic_featureziser.transform(X_test)
regressor_quadratic = LinearRegression()
regressor_quadratic.fit(X_train_quadratic,y_train)
xx_quadratic = quadratic_featureziser.transform(xx.reshape(xx.shape[0],1))
plt.plot(xx, regressor_quadratic.predict(xx_quadratic), c='r',linestyle = '--')
plt.title("Pizza price regressed on diameter")
plt.xlabel("Diameter in inches")
plt.ylabel("Price in dollars")
plt.axis([0,25,0,25])
plt.grid(True)
plt.scatter(X_train, y_train)
plt.show()
|
Chapters/Two/Simple Linear Regression.ipynb
|
fadeetch/Mastering-ML-Python
|
mit
|
Apply linear regression on Wine dataset from UCI
|
import pandas as pd
target_url = ("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv")
df = pd.read_csv(target_url,header=0, sep=";")
df.head()
df.describe()
plt.scatter(df['alcohol'], df['quality'])
plt.xlabel("Alcohol")
plt.ylabel("Quality")
plt.title("Alcohol against Quality")
plt.show()
|
Chapters/Two/Simple Linear Regression.ipynb
|
fadeetch/Mastering-ML-Python
|
mit
|
Fit and evaluate the model
|
from sklearn.cross_validation import train_test_split
#Split into feature and target, train and test
X = df[list(df.columns)[:-1]]
y = df['quality']
X.head()
y.tail()
X_train, X_test, y_train, y_test = train_test_split(X, y)
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_predictions = regressor.predict(X_test)
#Check R squared
print("R squared is: ", regressor.score(X_test, y_test))
|
Chapters/Two/Simple Linear Regression.ipynb
|
fadeetch/Mastering-ML-Python
|
mit
|
Cross validation
|
#Make cross validation
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(regressor, X, y, cv = 5)
print(scores.mean(), scores)
|
Chapters/Two/Simple Linear Regression.ipynb
|
fadeetch/Mastering-ML-Python
|
mit
|
Fitting using gradient descent
|
from sklearn.datasets import load_boston
from sklearn.linear_model import SGDRegressor
from sklearn.preprocessing import StandardScaler
data = load_boston()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target)
X_scaler = StandardScaler()
y_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train)
X_test = X_scaler.transform(X_test)
y_test = y_scaler.transform(y_test)
regressor = SGDRegressor(loss='squared_loss')
scores = cross_val_score(regressor, X_train, y_train, cv=5)
print('Cross validation r-squared scores:', scores)
print('Average cross validation r-squared score:', np.mean(scores))
regressor.fit_transform(X_train, y_train)
print('Test set r-squared score', regressor.score(X_test, y_test))
|
Chapters/Two/Simple Linear Regression.ipynb
|
fadeetch/Mastering-ML-Python
|
mit
|
Load data (in LiPD format)
|
loadLipds()
f1 = getCsv("Crystal2013.lpd")
d18O = f1["Crystal2013.paleo1measurement1.csv"]["d18O_VPDB"]
d18O = np.asarray(d18O)
depth = f1["Crystal2013.paleo1measurement1.csv"]["Depth"]
depth = np.asarray(depth)
depth = depth /10.0 #depth in cm
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Load chronology information
|
age_c = f1["Crystal2013.chron1measurement1.csv"]["230Th Age (yr BP) (corrected)"]
age_c = np.asarray(age_c)
age_c = np.insert(age_c,0,-57) #insert the top age
depth_age = f1["Crystal2013.chron1measurement1.csv"]["Depth (mm)"]
depth_age = np.asarray(depth_age)
depth_age = np.insert(depth_age,0,0.05)
age_sd = f1["Crystal2013.chron1measurement1.csv"]["230Th Age_uncertaity (yr BP) (corrected)"]
age_sd = np.asarray(age_sd)
age_sd = np.insert(age_sd,0,0.1)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
plot raw age model
|
# plot age model
plt.rc('text', usetex=True,)
plt.rc('font', family='sans-serif')
# create figure
f, (ax1, ax2) = plt.subplots(2, 1)
f.set_figheight(9); f.set_figwidth(8)
ax1.spines["top"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.errorbar(depth_age,age_c,age_sd)
ax1.set_xlabel(r'Depth (mm)')
ax1.set_ylabel(r'Age (years since 2009, 2$\sigma$ errors)')
ax1.set_title(r'Crystal Raw age model, 10 dates')
plt.subplots_adjust(hspace = 0.4)
ax2.spines["top"].set_visible(False)
ax2.spines["right"].set_visible(False)
ax2.errorbar(depth_age,age_c,age_sd,color='Crimson')
ax2.set_xlabel(r'Depth (mm)')
ax2.set_ylabel(r'Age (years since 2009, 2$\sigma$ errors)')
ax2.set_title(r'Age model respecting the principle of superposition (10 dates)')
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Preparation for the age modelling
|
# convert to ages BP
year_BP = age_c
year_CE = 1950 - year_BP
top_date = 2007
year_top = year_BP + top_date - 1950
nyears = int(year_CE.max()-year_CE.min())+1
positions = FloatVector(depth_age/10.0)# position in core in cm
ages = FloatVector(year_BP)# age estimate
ages_CE = FloatVector(year_top)# age estimate
sd = FloatVector(age_sd/2)# SD of ages
nd = age_c.shape[0] # number of dates
calCurves = StrVector(['normal' for i in range(nd)] )
#predictPositions = r.seq(0,d.max(),by=d.max()/nyears)
predictPositions = FloatVector(depth)
# Specify extractDate (top-most age of the core, in years BP)
topDate= 1950 - top_date
#topDate = 0
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Generate different age-depth realizations by using Bchron age model
|
Bchron=importr('Bchron')
haha = Bchron.Bchronology(ages=ages, ageSds=sd, positions=positions, iterations=10000, burn=2000, thin=8, extractDate=topDate, calCurves=calCurves, predictPositions=predictPositions)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Retrieve the result of estimated ages
|
# The variable 'theta' is the posterior estimated values of the ages, and is stored as
# the first matrix of the List Vector class 'ages'
theta = haha[0]
theta=np.array(theta)
thetaPredict = haha[4]
thetaPredict=np.array(thetaPredict)
# Save depth horizons
depths=np.array(predictPositions)
depth_horizons=depths[:-1]
# save all the chronologies as independent time axes and plot original data with
# varying X values.
thetaPredict.shape
chrons=thetaPredict[:,:-1] # reshape to be same size as input data
# Call BCHRON observation model to return CHRONS (in years BP)
# NOTE" Bchron will FREAK OUT unless ages increase with depth (no reversals)
chronBP = chrons
# recast in years CE
chronCE = np.flipud(1950 - chronBP).transpose()
chronQ = mquantiles(chronCE, prob=[0.025, 0.5, 0.975], axis=1)
nchrons = chronCE.shape[1]
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Plot the age model
|
# PLOT IT OUT
x = depth_horizons*10
f2 = plt.figure(figsize=(10,8))
ax = plt.axes()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.fill_between(x, chronQ[:,0], chronQ[:,2], facecolor='Silver',edgecolor='Silver',lw=0.0) # see http://stackoverflow.com/questions/14143092/why-does-matplotlib-fill-between-draw-edgelines-only-on-a-pdf
CI = mpatches.Patch(color='silver') # create proxy artist for labeling
lbl = ('95\% CI','median','U/Th dates','sample paths')
dat = plt.errorbar(depth_age,year_CE,age_sd,color='Crimson',fmt='o')
med, = plt.plot(x,chronQ[:,1],color = 'black', lw=3.0) # that comma is really important !! wouldn't work without it
# plot a few random paths
nl = 10
idx = np.random.randint(nchrons+1, size=nl)
l = plt.plot(x,chronCE[:,idx],lw=0.5,color = 'Crimson')
#col = plt.cm.YlGn(np.arange(nl))
#for i in range(nl):
# plt.plot(x,chronCE[:,idx[i]],lw=1,color=col[i,:])
# plt.draw()
# I do not understand why line colors do not get updated. this is most irritating
lg = plt.legend((CI,med,dat,l[1]),lbl,loc='upper right'); lg.draw_frame(False)
plt.grid(axis='y'); plt.ylim(750,2020); plt.xlim(0,120)
plt.xlabel(r'Depth (mm)',fontsize=14); plt.ylabel(r'Year (CE)',fontsize=14);
plt.title(r'Crystal BChron sampling paths, 10 dates',fontsize=16)
#f2.savefig('Crystal_BChron.pdf', dpi=400, facecolor='w', edgecolor='w',transparent=True)
# save to file
np.savez('Crystal_Bchron.npz',chronCE=chronCE,depth_horizons=depth_horizons)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
plot timeseries quantiles
|
f, ax = plt.subplots(1, 1)
f.set_figheight(6); f.set_figwidth(8)
chronQ = mquantiles(chronCE, prob=[0.025, 0.5, 0.975], axis=1)
Xr = d18O[:-1]
hp,= plt.plot(chronQ[:,0], Xr, lw=0.5, color='silver',ls='dashed')
hl,= plt.plot(chronQ[:,2], Xr, lw=0.5, color='silver')
hm,= plt.plot(chronQ[:,1], Xr, lw=1.0, color='DarkBlue')
plt.xlim(750,2020); plt.ylim(-7.5,-11); plt.grid(axis='y')
lbl = ('97.5\% quantile','median','2.5\% quantile')
lg = plt.legend((hp,hm,hl),lbl,loc='upper right',fontsize=9); lg.draw_frame(False)
# label axes
plt.ylabel(r'$\delta^{18}\mathrm{O}$',fontsize=12)
plt.xlabel(r'Year (CE)',fontsize=12)
plt.title(r'Crystal $\delta^{18}\mathrm{O}$ Bchron ensemble',fontsize=14)
#f.savefig('Crystal_ts.pdf', dpi=400, facecolor='w', edgecolor='w',transparent=True)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Now we are going to correlate speleothem $\delta^{18}O$ with sea surface temperature
First read monthly sea surface temperature from a netCDF file
|
#read Kaplan SST
nc = xray.open_dataset('/Users/hujun/Downloads/sst.mon.anom.nc')
lat0 = nc['lat']
lon0 = nc['lon']
sst = nc['sst']
nlat=lat0.shape[0]
nlon=lon0.shape[0]
sst_ann=sst[19:1831:12,:,:]
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Prepare functions for filtering
|
def butter_lowpass(cutoff, fs, order=3):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def filter(x, cutoff, axis, fs=1.0, order=3):
b, a = butter_lowpass(cutoff, fs, order=order)
y = filtfilt(b, a, x, axis=axis)
return y
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Get the annual mean SST from 1857-2007
|
#annual mean
for i in range(sst_ann.shape[0]):
sst_ann[i,:,:]=np.mean(sst[i*12+19:(i+1)*12+19,:,:],axis=0)
#sst_ann_fil=filter(sst_ann,1.0/30,0)
sst_ann_fil=sst_ann
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Read age model results
|
crys = np.load('Crystal_Bchron.npz')
chronCE = crys['chronCE']
depth = crys['depth_horizons']
chronQ = mquantiles(chronCE, prob=[0.025, 0.5, 0.975], axis=1)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Prepare the function for deleting repeated calendar year in the age model result
|
def coarse_grain(t,X):
tu = np.unique(t) # find unique values
nu = tu.shape[0]
Xu = np.zeros(nu,)
for j in range(nu):
Xu[j] = np.mean(X[np.where(t==tu[j])])
return (tu,Xu)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Interpolate $\delta^{18}O$ on the annual scale
|
#interpolation
d18O_multi=np.zeros((chronCE.shape[1],sst_ann_fil.shape[0]))
sst_corr=np.zeros((chronCE.shape[1],nlat,nlon))
for it in range(chronCE.shape[1]):
tu, Xu = coarse_grain(chronCE[:,it],Xr)
#interpolation
tck = interpolate.splrep(tu,Xu,s=1)
xnew = range(1857,2008,1)
ynew = interpolate.splev(xnew,tck)
d18O_multi[it,:] = ynew
#interpolation of the median series
tu, Xu = coarse_grain(chronQ[:,1],Xr)
tck = interpolate.splrep(tu,Xu,s=1)
xnew = range(1857,2008,1)
ynew = interpolate.splev(xnew,tck)
d18O_median = ynew
d18O_fil=d18O_multi
d18O_median_fil=d18O_median
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Calculate correlations between the each realization of $\delta^{18}O$ and SST in each grid cell
|
#correlation
#sst_ann_new=sst_ann_fil.transpose(1,2,0)
sst_ann_new=sst_ann_fil.transpose("lat","lon","time")
d18O_new=np.transpose(d18O_fil)
sst_ano=np.ma.anomalies(sst_ann_new,axis=2)
d18O_ano=np.ma.anomalies(d18O_new,axis=0)
nomi=np.dot(sst_ano,d18O_ano)
sst_sd=np.sum(sst_ano**2,axis=2)
d18O_sd=np.sum(d18O_ano**2,axis=0)
d18O_median_ano=np.ma.anomalies(d18O_median)
d18O_median_sd=np.sum(d18O_median_ano**2,axis=0)
nomi_median=np.dot(sst_ano,d18O_median_ano)
corr_sst=nomi/np.sqrt(np.dot(sst_sd[:,:,None],d18O_sd[None,:]))
corr_sst_median=nomi_median/np.sqrt(np.dot(sst_sd[:,:,None],d18O_median_sd[None]))
corr_sst_new=np.reshape(corr_sst,(nlat*nlon,1000))
corrQ = mquantiles(corr_sst_new, prob=[0.025, 0.5, 0.975], axis=1)
corrQ_new = np.reshape(corrQ,(nlat,nlon,3))
#Interquatile Range
IQR=corrQ_new[:,:,2]-corrQ_new[:,:,0]
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Test significance of the calculated correlations (replace the Nsample to your raw sample size)
|
#t-test for correlation
corrQ975= corrQ_new[:,:,2]
corrQ025= corrQ_new[:,:,0]
corrQ050= corrQ_new[:,:,1]
d18O_coef, d18O_sigma = alg.AR_est_YW(d18O_median_ano,1)
neff_array=sst_ano[:,:,0]
latt,lont=[],[]
pval_025=[]
pval_975=[]
pval_med=[]
Nsample = 151
for ilat in range(nlat):
for ilon in range(nlon):
if np.isnan(sst_ano[ilat,ilon,0])==False:
coef, sigma = alg.AR_est_YW(sst_ano[ilat,ilon,:],1)
# sst_coef[ilat,ilon] = coef
neff=Nsample*(1-d18O_coef*coef)/(1+d18O_coef*coef)
# neff=151
latt.append(lat0[ilat])
lont.append(lon0[ilon])
tval=corrQ975[ilat,ilon]/np.sqrt(1-corrQ975[ilat,ilon]**2)*np.sqrt(neff-2)
pval0=t.sf(abs(tval),neff-2)*2
pval_975.append(pval0)
tval=corrQ025[ilat,ilon]/np.sqrt(1-corrQ025[ilat,ilon]**2)*np.sqrt(neff-2)
pval0=t.sf(abs(tval),neff-2)*2
pval_025.append(pval0)
tval=corrQ050[ilat,ilon]/np.sqrt(1-corrQ050[ilat,ilon]**2)*np.sqrt(neff-2)
pval0=t.sf(abs(tval),neff-2)*2
pval_med.append(pval0)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Check and plot p-values in the FDR procedure
|
#check i/m vs. p-values
indexm = np.arange(1,len(pval_975)+1,1)
im = 1.0*indexm / len(pval_975)
thres = 0.05*im
pval_975_s = sorted(pval_975)
#pval_025_s = sorted(pval_025)
smaller=[]
small_index=[]
larger=[]
large_index=[]
n=0
for pp in pval_975_s:
#for pp in pval_025_s:
if pp <=0.05:
smaller.append(pp)
small_index.append(im[n])
else:
larger.append(pp)
large_index.append(im[n])
n=n+1
plt.plot(im,pval_975_s,'kx',markersize=1.5)
#plt.plot(im,pval_025_s,'kx',markersize=1.5)
plt.plot(im,thres)
plt.plot(small_index,smaller,'bx',markersize=1.5)
plt.plot(large_index,larger,'kx',markersize=1.5)
plt.axhline(y=0.05,linestyle='dashed')
plt.xlabel('index/m',fontsize=14)
plt.ylabel('p-value',fontsize=14)
plt.tick_params(labelsize=14)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
FDR procedure
|
#start FDR procedure
pvalr_975 = FloatVector(pval_975)
pvalr_025 = FloatVector(pval_025)
pvalr_med = FloatVector(pval_med)
r.source("fdr.R")
#sig_975 = r.fdr(pvalr_975,method="original",adjustment_method="mean")
#sig_025 = r.fdr(pvalr_025,method="original",adjustment_method="mean")
#sig_med = r.fdr(pvalr_med,method="original",adjustment_method="mean")
sig_975 = r.fdr(pvalr_975,method="original")
sig_025 = r.fdr(pvalr_025,method="original")
sig_med = r.fdr(pvalr_med,method="original")
#print(sig_975)
#prepare all global grids
latmedian=latt[:]
lonmedian=lont[:]
lat975=latt[:]
lon975=lont[:]
lat025=latt[:]
lon025=lont[:]
#delete grids which are significant
if sig_975:
for isig in sorted(sig_975,reverse=True):
del lat975[isig-1]
del lon975[isig-1]
# lat975.append(latt[isig-1])
# lon975.append(lont[isig-1])
if sig_025:
for isig in sorted(sig_025,reverse=True):
del lat025[isig-1]
del lon025[isig-1]
if sig_med:
for isig in sorted(sig_med,reverse=True):
del latmedian[isig-1]
del lonmedian[isig-1]
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Plot figures of distributions of correlations (median, IQR, 2.5% quantile, 97.5% qunatile)
|
#plot figures
map = Basemap(projection='robin',resolution='l',lat_0=0,lon_0=180)
#map.drawcoastlines()
#map.fillcontinents(color='gray')
#map.drawmapboundary()
#map.drawmeridians(np.arange(0,360,30))
#map.drawparallels(np.arange(-90,90,30))
lons, lats = np.meshgrid(lon0, lat0)
x,y=map(lons,lats)
fig=plt.figure(figsize=(10,8))
ax1=fig.add_subplot(221)
#map.drawcoastlines(linewidth=1)
map.fillcontinents(color='gray')
#map.drawmapboundary()
map.drawmeridians(np.arange(0,360,30),color='DimGray')
map.drawparallels(np.arange(-90,90,30),color='DimGray')
clevs=np.linspace(-1,1,21)
cs=map.contourf(x,y,corrQ_new[:,:,1],clevs,cmap=plt.cm.RdBu_r)
cbar = map.colorbar(cs,location='bottom')
cbar.ax.tick_params(labelsize=14)
x2,y2=map(lonmedian,latmedian)
passt=map.plot(x2,y2,'ko',markersize=1.5)
ax1.set_title("(a) median",fontsize=14)
ax2=fig.add_subplot(222)
#map.drawcoastlines(linewidth=1)
map.fillcontinents(color='gray')
#map.drawmapboundary()
map.drawmeridians(np.arange(0,360,30),color='DimGray')
map.drawparallels(np.arange(-90,90,30),color='DimGray')
#clevs=np.linspace(0.8,1.5,10)
cs=map.contourf(x,y,IQR,cmap=plt.cm.OrRd)
cbar = map.colorbar(cs,location='bottom')
cbar.ax.tick_params(labelsize=14)
ax2.set_title("(b) IQR",fontsize=14)
ax3=fig.add_subplot(223)
#map.drawcoastlines(linewidth=1)
map.fillcontinents(color='gray')
#map.drawmapboundary()
map.drawmeridians(np.arange(0,360,30),color='DimGray')
map.drawparallels(np.arange(-90,90,30),color='DimGray')
clevs=[-1,-0.9,-0.8,-0.7,-0.6,-0.5,-0.4,-0.3,-0.2,-0.1,0]
cs=map.contourf(x,y,corrQ_new[:,:,0],clevs,cmap=plt.cm.GnBu_r)
cbar = map.colorbar(cs,location='bottom')
cbar.ax.tick_params(labelsize=14)
x2,y2=map(lon025,lat025)
passt=map.plot(x2,y2,'ko',markersize=1.5)
ax3.set_title("(e) 2.5% quantile (FDR)",fontsize=14)
ax4=fig.add_subplot(224)
map = Basemap(projection='robin',resolution='l',lat_0=0,lon_0=180)
#map.drawcoastlines()
map.fillcontinents(color='gray')
#map.drawmapboundary()
map.drawmeridians(np.arange(0,360,30),color='DimGray')
map.drawparallels(np.arange(-90,90,30),color='DimGray')
clevs=[0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
cs=map.contourf(x,y,corrQ_new[:,:,2],clevs,cmap=plt.cm.OrRd)
cbar = map.colorbar(cs,location='bottom')
cbar.ax.tick_params(labelsize=14)
x2,y2=map(lon975,lat975)
passt=map.plot(x2,y2,'ko',markersize=1.5)
ax4.set_title("(f) 97.5% quantile (FDR)",fontsize=14)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Plot the spectra by using the MTM method
First load age model results
|
#d18O from McCabe paper
data_mc = genfromtxt('data/McCabe_crystal.txt', delimiter=',')
d18O_mc = data_mc[0:,2]
d18O_mc_rm = d18O_mc-np.mean(d18O_mc)
# load Bchron data
crys = np.load('Crystal_Bchron.npz')
chronCE = crys['chronCE']
depth = crys['depth_horizons']
chronQ = mquantiles(chronCE, prob=[0.025, 0.5, 0.975], axis=1)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Calculate the spectra of the median $\delta^{18}O$ from Bchron results
|
def spline_interp(tu,Xu,tstart,tend):
tck = interpolate.splrep(tu,Xu,s=1)
xnew = range(tstart,tend,1)
ynew = interpolate.splev(xnew,tck)
ynew_rm = ynew-np.mean(ynew)
return ynew_rm
#MTM spectrum for the median d18O from Bchron
tu_bchron, Xu_bchron = coarse_grain(chronQ[:,1],Xr)
ynew_bchron = spline_interp(tu_bchron,Xu_bchron,1000,2008)
f_m, psd_mt_m, nu_m = alg.multi_taper_psd(ynew_bchron, Fs=1.0, adaptive=False,jackknife=False)
#MTM spectrum for the d18O from McCabe paper
f_mc, psd_mt_mc, nu_mc = alg.multi_taper_psd(d18O_mc_rm,Fs=1.0,adaptive=False,jackknife=False)
#AR1 simulation (1000 ensembles)
pmtm_ar=np.zeros((len(f_m),chronCE.shape[1]))
#Bchron ensembles MTM spectrum
psd_multi=np.zeros((len(f_m),chronCE.shape[1]))
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Calculate all possible spectras from Bchron results
|
for it in range(chronCE.shape[1]):
tu, Xu = coarse_grain(chronCE[:,it],Xr)
ynew_rm = spline_interp(tu,Xu,1000,2008)
f, psd_mt, nu = alg.multi_taper_psd(ynew_rm,Fs=1.0,adaptive=False,jackknife=False)
psd_multi[:,it]=psd_mt
coef,sigma=alg.AR_est_YW(ynew_rm,1)
X_ar, noise, aph = utils.ar_generator(len(ynew_rm), sigma, coef)
f_ar, psd_mt_ar, nu_m = alg.multi_taper_psd(X_ar ,Fs=1.0,adaptive=False,jackknife=False)
pmtm_ar[:,it]=psd_mt_ar
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Plot the spectra
|
pq = mquantiles(pmtm_ar, prob=[0.025, 0.5, 0.975], axis=1)
psdQ = mquantiles(psd_multi, prob=[0.025, 0.5, 0.975], axis=1)
#plot the figure
pertick=np.array([200, 100 ,50, 20,10,8, 6,4, 2])
pertick_labels=(['200', '100' ,'50', '20','10','8' ,'6','4', '2'])
xtick=(1.0/pertick)
plt.loglog(f_m, psd_mt_m,label=r'$\delta^{18}\mathrm{O}$, median from Bchron', color='DodgerBlue',linewidth=1)
plt.loglog(f_ar, pq[:,2], label='AR(1), 97.5% quantile',color='red',linewidth=0.8)
plt.loglog(f_mc, psd_mt_mc,label=r'$\delta^{18}\mathrm{O}$ from McCabe et al. (2013)', color='green',linewidth=1)
plt.fill_between(f,psdQ[:,0],psdQ[:,2],label='Bchron 95% CI',facecolor='lightgrey',color='lightgrey',alpha=0.5)
plt.xlabel('Frequency (Years)',fontsize=12)
plt.ylabel('PSD',fontsize=14)
plt.xlim([1./200.,0.5])
plt.ylim([1e-4, 1e2])
plt.xticks(xtick, pertick_labels, size='small')
plt.tick_params(axis="both", which="both", bottom="on", top="off",
labelbottom="on", left="on", right="off", labelleft="on",direction="out",labelsize=12)
plt.minorticks_off()
plt.grid('on',axis='x',color='DimGray')
plt.legend(loc=3,fontsize=12,frameon=False)
|
CrystalCave.ipynb
|
ClimateTools/Correlation_EPSL
|
mit
|
Adding additional latent variables to the likelihood
The standard GPyTorch variational objects will take care of inferring the latent functions $f_1 \ldots f_k$. However, we do need to add the additional latent variables $z_i$ to the models. We will do so by creating a custom likelihood that models:
$$
\sum_{z_i=1}^k p(\mathbf y_i \mid \mathbf f (\mathbf x_i), z_i) \: p(z_i)
$$
GPyTorch's likelihoods are capable of modeling additional latent variables. Our custom likelihood needs to define the following three functions:
pyro_model (needs to call through to super().pyro_model at the end), which defines the prior distribution for additional latent variables
pyro_guide (needs to call through to super().pyro_guide at the end), which defines the variational (guide) distribution for additional latent variables
forward, which defines the observation distributions conditioned on \mathbf f (\mathbf x_i) and any additional latent variables.
The pyro_model function
For each task, we will model the cluster assignment with a OneHotCategorical variable, where each cluster has equal probability. The pyro_model function will make a pyro.sample call to this prior distribution and then call the super method:
```python
# self.prior_cluster_logits = torch.zeros(num_tasks, num_clusters)
def pyro_model(self, function_dist, target):
cluster_assignment_samples = pyro.sample(
self.name_prefix + ".cluster_logits", # self.name_prefix is added by PyroGP
pyro.distributions.OneHotCategorical(logits=self.prior_cluster_logits).to_event(1)
)
return super().pyro_model(
function_dist,
target,
cluster_assignment_samples=cluster_assignment_samples
)
```
Note that we are adding an additional argument cluster_assignment_samples to the super().pyro_model call. This will pass the cluster assignment samples to the forward call, which is necessary for inference.
The pyro_guide function
For each task, the variational (guide) diustribution will also be a OneHotCategorical variable, which will be defined by the parameter self.variational_cluster_logits. The pyro_guide function will make a pyro.sample call to this prior distribution and then call the super method:
python
def pyro_guide(self, function_dist, target):
pyro.sample(
self.name_prefix + ".cluster_logits", # self.name_prefix is added by PyroGP
pyro.distributions.OneHotCategorical(logits=self.variational_cluster_logits).to_event(1)
)
return super().pyro_guide(function_dist, target)
Note that we are adding an additional argument cluster_assignment_samples to the super().pyro_model call. This will pass the cluster assignment samples to the forward call, which is necessary for inference.
The forward function
The pyro_model fuction passes the additional keyword argument cluster_assignment_samples to the forward call. Therefore, our forward method will define the conditional probability $p(\mathbf y_i \mid \mathbf f(\mathbf x), z_i)$, where $\mathbf f(\mathbf x)$ corresponds to the variable function_samples and $z_i$ corresponds to the variable cluster_assignment_samples.
In our example $p(\mathbf y_i \mid \mathbf f(\mathbf x), z_i)$ corresponds to a Gaussian noise model.
``python
# self.raw_noise is the Gaussian noise parameter
# function_samples isn x k# cluster_assignment_samples isk x t, wheret` is the number of tasks
def forward(self, function_samples, cluster_assignment_samples):
return pyro.distributions.Normal(
loc=(function_samples.unsqueeze(-2) * cluster_assignment_samples).sum(-1),
scale=torch.nn.functional.softplus(self.raw_noise).sqrt()
).to_event(1)
# The to_event call is necessary because we are returning a multitask distribution,
# where each task dimension corresponds to each of the `t` tasks
```
This is all we need for inference! However, if we want to use this model to make predictions, the cluster_assignment_samples keyword argument will not be passed into the function. Therefore, we need to make sure that forward can handle both inference and predictions:
```python
def forward(self, function_samples, cluster_assignment_samples=None):
if cluster_assignment_samples is None:
# We'll get here at prediction time
# We'll use the variational distribution when making predictions
cluster_assignment_samples = pyro.sample(
self.name_prefix + ".cluster_logits", self._cluster_dist(self.variational_cluster_logits)
)
return pyro.distributions.Normal(
loc=(function_samples.unsqueeze(-2) * cluster_assignment_samples).sum(-1),
scale=torch.nn.functional.softplus(self.raw_noise).sqrt()
).to_event(1)
```
|
class ClusterGaussianLikelihood(gpytorch.likelihoods.Likelihood):
def __init__(self, num_tasks, num_clusters):
super().__init__()
# These are parameters/buffers for the cluster assignment latent variables
self.register_buffer("prior_cluster_logits", torch.zeros(num_tasks, num_clusters))
self.register_parameter("variational_cluster_logits", torch.nn.Parameter(torch.randn(num_tasks, num_clusters)))
# The Gaussian observational noise
self.register_parameter("raw_noise", torch.nn.Parameter(torch.tensor(0.0)))
# Other info
self.num_tasks = num_tasks
self.num_clusters = num_clusters
self.max_plate_nesting = 1
def pyro_guide(self, function_dist, target):
# Here we add the extra variational distribution for the cluster latent variable
pyro.sample(
self.name_prefix + ".cluster_logits", # self.name_prefix is added by PyroGP
pyro.distributions.OneHotCategorical(logits=self.variational_cluster_logits).to_event(1)
)
return super().pyro_guide(function_dist, target)
def pyro_model(self, function_dist, target):
# Here we add the extra prior distribution for the cluster latent variable
cluster_assignment_samples = pyro.sample(
self.name_prefix + ".cluster_logits", # self.name_prefix is added by PyroGP
pyro.distributions.OneHotCategorical(logits=self.prior_cluster_logits).to_event(1)
)
return super().pyro_model(function_dist, target, cluster_assignment_samples=cluster_assignment_samples)
def forward(self, function_samples, cluster_assignment_samples=None):
# For inference, cluster_assignment_samples will be passed in
# This bit of code is for when we use the likelihood in the predictive mode
if cluster_assignment_samples is None:
cluster_assignment_samples = pyro.sample(
self.name_prefix + ".cluster_logits", self._cluster_dist(self.variational_cluster_logits)
)
# Now we return the observational distribution, based on the function_samples and cluster_assignment_samples
res = pyro.distributions.Normal(
loc=(function_samples.unsqueeze(-2) * cluster_assignment_samples).sum(-1),
scale=torch.nn.functional.softplus(self.raw_noise).sqrt()
).to_event(1)
return res
|
examples/07_Pyro_Integration/Clustered_Multitask_GP_Regression.ipynb
|
jrg365/gpytorch
|
mit
|
Constructing the PyroGP model
The PyroGP model is essentially the same as the model we used in the simple example, except for two changes
We now will use our more complicated ClusterGaussianLikelihood
The latent function should be vector valued to correspond to the k latent functions. As a result, we will learn a batched variational distribution, and use a IndependentMultitaskVariationalStrategy to convert the batched variational distribution into a MultitaskMultivariateNormal distribution.
|
class ClusterMultitaskGPModel(gpytorch.models.pyro.PyroGP):
def __init__(self, train_x, train_y, num_functions=2, reparam=False):
num_data = train_y.size(-2)
# Define all the variational stuff
inducing_points = torch.linspace(0, 1, 64).unsqueeze(-1)
variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(
num_inducing_points=inducing_points.size(-2),
batch_shape=torch.Size([num_functions])
)
# Here we're using a IndependentMultitaskVariationalStrategy - so that the output of the
# GP latent function is a MultitaskMultivariateNormal
variational_strategy = gpytorch.variational.IndependentMultitaskVariationalStrategy(
gpytorch.variational.VariationalStrategy(self, inducing_points, variational_distribution),
num_tasks=num_functions,
)
# Standard initializtation
likelihood = ClusterGaussianLikelihood(train_y.size(-1), num_functions)
super().__init__(variational_strategy, likelihood, num_data=num_data, name_prefix=str(time.time()))
self.likelihood = likelihood
self.num_functions = num_functions
# Mean, covar
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
res = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return res
|
examples/07_Pyro_Integration/Clustered_Multitask_GP_Regression.ipynb
|
jrg365/gpytorch
|
mit
|
Every wxPython app is an instance of wx.App. For most simple applications you can use wx.App as is. When you get to more complex applications you may need to extend the wx.App class. The "False" parameter means "don't redirect stdout and stderr to a window".
A wx.Frame is a top-level window. The syntax is wx.Frame(Parent, Id, Title). Most of the constructors have this shape (a parent object, followed by an Id). In this example, we use None for "no parent" and wx.ID_ANY to have wxWidgets pick an id for us.
wx.TextCtrl widget
To add some text to the frame, we have to use the widget wx.TextCtrl.
By default, a text box is a single-line field, but the wx.TE_MULTILINE parameter allows you to enter multiple lines of text.
In this example, we derive from wx.Frame and overwrite its _init_ method. Here we declare a new wx.TextCtrl which is a simple text edit control. Note that since the MyFrame runs self.Show() inside its _init_ method, we no longer have to call frame.Show() explicitly.
|
%%writefile editor.py
#!/usr/bin/env python
import wx
class MyFrame(wx.Frame):
""" We simply derive a new class of Frame. """
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(200,100))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.Show(True)
app = wx.App(False)
frame = MyFrame(None, 'Small editor')
app.MainLoop()
!python editor.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Status bar & Menu bar
Typically, an application has a menu and sometimes a status bar to output messages.
Notice the wx.ID_ABOUT and wx.ID_EXIT ids. These are standard ids provided by wxWidgets (see a full list at http://docs.wxwidgets.org/2.8.12/wx_stdevtid.html). It is a good habit to use the standard ID if there is one available. This helps wxWidgets know how to display the widget in each platform to make it look more native.
|
%%writefile editor.py
#!/usr/bin/env python
import wx
class MainWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(200,100))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.CreateStatusBar() # A Statusbar in the bottom of the window
# Setting up the menu.
filemenu= wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.
filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.Show(True)
app = wx.App(False)
frame = MainWindow(None, "Sample editor")
app.MainLoop()
!python editor.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Event handling
Reacting to events in wxPython is called event handling. An event is when "something" happens on your application (a button click, text input, mouse movement, etc). Much of GUI programming consists of responding to events. You bind an object to an event using the Bind() method:
python
class MainWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self,parent, title=title, size=(200,100))
...
menuItem = filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
self.Bind(wx.EVT_MENU, self.OnAbout, menuItem)
This means that, from now on, when the user selects the "About" menu item, the method self.OnAbout will be executed. wx.EVT_MENU is the "select menu item" event. wxWidgets understands many other events (see the full list
at https://wiki.wxpython.org/ListOfEvents). The self.OnAbout method has the general declaration:
python
def OnAbout(self, event):
...
Here event is an instance of a subclass of wx.Event. For example, a button-click event - wx.EVT_BUTTON - is a subclass of wx.Event.
The method is executed when the event occurs. By default, this method will handle the event and the event will stop after the callback finishes. However, you can "skip" an event with event.Skip(). This causes the event to go through the hierarchy of event handlers. For example:
```python
def OnButtonClick(self, event):
if (some_condition):
do_something()
else:
event.Skip()
def OnEvent(self, event):
...
```
When a button-click event occurs, the method OnButtonClick gets called. If some_condition is true, we do_something() otherwise we let the event be handled by the more general event handler. Now let's have a look at our application:
|
%%writefile editor.py
#!/usr/bin/env python
import os
import wx
class MainWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(200,100))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.CreateStatusBar() # A StatusBar in the bottom of the window
# Setting up the menu.
filemenu= wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard ids provided by wxWidgets.
menuAbout = filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
# Set events.
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Show(True)
def OnAbout(self,e):
# A message dialog box with an OK button. wx.OK is a standard ID in wxWidgets.
dlg = wx.MessageDialog( self, "A small text editor", "About Sample Editor", wx.OK)
dlg.ShowModal() # Show it
dlg.Destroy() # finally destroy it when finished.
def OnExit(self,e):
self.Close(True) # Close the frame.
app = wx.App(False)
frame = MainWindow(None, "Sample editor")
app.MainLoop()
!python editor.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Dialogs
Of course an editor is useless if it is not able to save or open documents. That's where Common dialogs come in. Common dialogs are those offered by the underlying platform so that your application will look exactly like a native application. Here is the implementation of the OnOpen method in MainWindow:
python
def OnOpen(self,e):
""" Open a file"""
self.dirname = ''
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
f = open(os.path.join(self.dirname, self.filename), 'r')
self.control.SetValue(f.read())
f.close()
dlg.Destroy()
Explanation:
First, we create the dialog by calling the appropriate Constructor.
Then, we call ShowModal. That opens the dialog - "Modal" means that the user cannot do anything on the application until he clicks OK or Cancel.
The return value of ShowModal is the Id of the button pressed. If the user pressed OK we read the file.
|
%%writefile editor.py
#!/usr/bin/env python
import os
import wx
class MainWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(200,100))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.CreateStatusBar() # A StatusBar in the bottom of the window
# Setting up the menu.
filemenu= wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard ids provided by wxWidgets.
menuOpen = filemenu.Append(wx.ID_OPEN, "&Open",
" Open text file")
menuAbout = filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
# Set events.
self.Bind(wx.EVT_MENU, self.OnOpen, menuOpen)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Show(True)
def OnAbout(self,e):
# A message dialog box with an OK button. wx.OK is a standard ID in wxWidgets.
dlg = wx.MessageDialog( self, "A small text editor", "About Sample Editor", wx.OK)
dlg.ShowModal() # Show it
dlg.Destroy() # finally destroy it when finished.
def OnExit(self,e):
self.Close(True) # Close the frame.
def OnOpen(self,e):
""" Open a file"""
self.dirname = ''
dlg = wx.FileDialog(self, "Choose a file", self.dirname,
"", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
f = open(os.path.join(self.dirname, self.filename), 'r')
self.control.SetValue(f.read())
f.close()
dlg.Destroy()
app = wx.App(False)
frame = MainWindow(None, "Sample editor")
app.MainLoop()
!python editor.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Working with Windows
In this section, we are going to present the way wxPython deals with windows and their contents, including building input forms and using various widgets/controls. We are going to build a small application that calculates the price of a quote.
Laying out Visual Elements
Within a frame, you'll use a number of wxWindow sub-classes to flesh out the frame's contents. Here are some of the more common elements you might want to put in your frame:
- wx.MenuBar, which puts a menu bar along the top of your frame.
- wx.StatusBar, which sets up an area along the bottom of your frame for displaying status messages, etc.
- wx.ToolBar, which puts a toolbar in your frame.
- Sub-classes of wx.Control. These are objects which represent user interface widgets (ie, visual elements which display data and/or process user input). Common examples of wx.Control objects include wx.Button, wx.StaticText, wx.TextCtrl and wx.ComboBox.
- wx.Panel, which is a container to hold your various wx.Control objects. Putting your wx.Control objects inside a wx.Panel means that the user can tab from one UI widget to the next.
All visual elements (wxWindow objects and their subclasses) can hold sub-elements. Thus, for example, a wx.Frame might hold a number of wx.Panel objects, which in turn hold a number of wx.Button, wx.StaticText and wx.TextCtrl objects, giving you an entire hierarchy of elements:  Note that this merely describes the way that certain visual elements are interrelated -- not how they are visually laid out within the frame. To handle the layout of elements within a frame, there are several options.
We are going to show the usage of wxSizers.
A sizer (that is, one of the wx.Sizer sub-classes) can be used to handle the visual arrangement of elements within a window or frame. Sizers can:
- Calculate an appropriate size for each visual element.
- Position the elements according to certain rules.
- Dynamically resize and/or reposition elements when a frame is resized.
Some of the more common types of sizers include:
- wx.BoxSizer, which arranges visual elements in a line going either horizontally or vertically.
- wx.GridSizer, which lays visual elements out into a grid-like structure.
- wx.FlexGridSizer, which is similar to a wx.GridSizer except that it allow for more flexibility in laying out visual elements.
A sizer is given a list of wx.Window objects to size, either by calling sizer.Add(window, options...), or by calling sizer.AddMany(...). A sizer will only work on those elements which it has been given. Sizers can be nested. That is, you can add one sizer to another sizer, for example to have two rows of buttons (each laid out by a horizontal wx.BoxSizer) contained within another wx.BoxSizer which places the rows of buttons one above the other.
Note: Notice that the above example does not lay out the six buttons into two rows of three columns each -- to do that, you should use a wxGridSizer.
In the following example we use two nested sizers, the main one with vertical layout and the embedded one with horizontal layout:
|
%%writefile sizer_demo.py
#!/usr/bin/env python
import wx
import os
class MainWindow(wx.Frame):
def __init__(self, parent, title):
self.dirname=''
# A "-1" in the size parameter instructs wxWidgets to use the default size.
# In this case, we select 200px width and the default height.
wx.Frame.__init__(self, parent, title=title, size=(200,-1))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)
self.CreateStatusBar() # A Statusbar in the bottom of the window
# Setting up the menu.
filemenu= wx.Menu()
menuOpen = filemenu.Append(wx.ID_OPEN, "&Open"," Open a file to edit")
menuAbout= filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
# Events.
self.Bind(wx.EVT_MENU, self.OnOpen, menuOpen)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.sizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.buttons = []
for i in range(0, 6):
self.buttons.append(wx.Button(self, -1, "Button &"+str(i)))
self.sizer2.Add(self.buttons[i], 1, wx.EXPAND)
# Use some sizers to see layout options
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.control, 1, wx.EXPAND)
self.sizer.Add(self.sizer2, 0, wx.EXPAND)
#Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
self.sizer.Fit(self)
self.Show()
def OnAbout(self,e):
# Create a message dialog box
dlg = wx.MessageDialog(self, " A sample editor \n in wxPython", "About Sample Editor", wx.OK)
dlg.ShowModal() # Shows it
dlg.Destroy() # finally destroy it when finished.
def OnExit(self,e):
self.Close(True) # Close the frame.
def OnOpen(self,e):
""" Open a file"""
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
f = open(os.path.join(self.dirname, self.filename), 'r')
self.control.SetValue(f.read())
f.close()
dlg.Destroy()
app = wx.App(False)
frame = MainWindow(None, "Sample editor")
app.MainLoop()
!python sizer_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
The sizer.Add method has three arguments. The first one specifies the control to include in the sizer. The second one is a weight factor which means that this control will be sized in proportion to other ones. For example, if you had three edit controls and you wanted them to have the proportions 3:2:1 then you would specify these factors as arguments when adding the controls. 0 means that this control or sizer will not grow. The third argument is normally wx.GROW (same as wx.EXPAND) which means the control will be resized when necessary. If you use wx.SHAPED instead, the controls aspect ratio will remain the same.
If the second parameter is 0, i.e. the control will not be resized, the third parameter may indicate if the control should be centered horizontally and/or vertically by using wx.ALIGN_CENTER_HORIZONTAL, wx.ALIGN_CENTER_VERTICAL, or wx.ALIGN_CENTER (for both) instead of wx.GROW or wx.SHAPED as that third parameter.
You can alternatively specify combinations of wx.ALIGN_LEFT, wx.ALIGN_TOP, wx.ALIGN_RIGHT, and wx.ALIGN_BOTTOM. The default behavior is equivalent to wx.ALIGN_LEFT | wx.ALIGN_TOP.
One potentially confusing aspect of the wx.Sizer and its sub-classes is the distinction between a sizer and a parent window. When you create objects to go inside a sizer, you do not make the sizer the object's parent window. A sizer is a way of laying out windows, it is not a window in itself. In the above example, all six buttons would be created with the parent window being the frame or window which encloses the buttons -- not the sizer. If you try to create a visual element and pass the sizer as the parent window, your program will crash.
Once you have set up your visual elements and added them to a sizer (or to a nested set of sizers), the next step is to tell your frame or window to use the sizer. You do this in three steps:
python
window.SetSizer(sizer)
window.SetAutoLayout(True)
sizer.Fit(window)
The SetSizer() call tells your window (or frame) which sizer to use. The call to SetAutoLayout() tells your window to use the sizer to position and size your components. And finally, the call to sizer.Fit() tells the sizer to calculate the initial size and position for all its elements. If you are using sizers, this is the normal process you would go through to set up your window or frame's contents before it is displayed for the first time.
Controls
You will find a complete list of the numerous Controls that exist in wxPython in the demo and help, but here we are going to present those most frequently used:
wxButton The most basic Control: A button showing a text that you can click. For example, here is a "Clear" button (e.g. to clear a text):
python
clearButton = wx.Button(self, wx.ID_CLEAR, "Clear")
self.Bind(wx.EVT_BUTTON, self.OnClear, clearButton)
wxTextCtrl This control let the user input text. It generates two main events. EVT_TEXT is called whenever the text changes. EVT_CHAR is called whenever a key has been pressed.
python
textField = wx.TextCtrl(self)
self.Bind(wx.EVT_TEXT, self.OnChange, textField)
self.Bind(wx.EVT_CHAR, self.OnKeyPress, textField)
For example: If the user presses the "Clear" button and that clears the text field, that will generate an EVT_TEXT event, but not an EVT_CHAR event.
wxComboBox A combobox is very similar to wxTextCtrl but in addition to the events generated by wxTextCtrl, wxComboBox has the EVT_COMBOBOX event.
wxCheckBox The checkbox is a control that gives the user true/false choice.
wxRadioBox The radiobox lets the user choose from a list of options.
Let's see an example by defining a more complex panel:
|
%%writefile example.py
import wx
class ExamplePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.quote = wx.StaticText(self, label="Your quote :",
pos=(20, 30))
# A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it
self.logger = wx.TextCtrl(self, pos=(300,20), size=(200,300),
style=wx.TE_MULTILINE | wx.TE_READONLY)
# A button
self.button =wx.Button(self, label="Save", pos=(200, 325))
self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
# the edit control - one line version.
self.lblname = wx.StaticText(self, label="Your name :",
pos=(20,60))
self.editname = wx.TextCtrl(self, value="Enter here your name",
pos=(150, 60), size=(140,-1))
self.Bind(wx.EVT_TEXT, self.EvtText, self.editname)
self.Bind(wx.EVT_CHAR, self.EvtChar, self.editname)
# the combobox Control
self.sampleList = ['friends', 'advertising', 'web search',
'Yellow Pages']
self.lblhear = wx.StaticText(self,
label="How did you hear from us ?",
pos=(20, 90))
self.edithear = wx.ComboBox(self, pos=(150, 90), size=(95, -1),
choices=self.sampleList,
style=wx.CB_DROPDOWN)
self.Bind(wx.EVT_COMBOBOX, self.EvtComboBox, self.edithear)
self.Bind(wx.EVT_TEXT, self.EvtText,self.edithear)
# Checkbox
self.insure = wx.CheckBox(self,
label="Do you want Insured Shipment ?",
pos=(20,180))
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.insure)
# Radio Boxes
radioList = ['blue', 'red', 'yellow', 'orange', 'green',
'purple', 'navy blue', 'black', 'gray']
rb = wx.RadioBox(self, label="What color would you like ?",
pos=(20, 210), choices=radioList,
majorDimension=3,
style=wx.RA_SPECIFY_COLS)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
def EvtRadioBox(self, event):
self.logger.AppendText('EvtRadioBox: %d\n' % event.GetInt())
def EvtComboBox(self, event):
self.logger.AppendText('EvtComboBox: %s\n' % event.GetString())
def OnClick(self,event):
self.logger.AppendText(" Click on object with Id %d\n" %event.GetId())
def EvtText(self, event):
self.logger.AppendText('EvtText: %s\n' % event.GetString())
def EvtChar(self, event):
self.logger.AppendText('EvtChar: %d\n' % event.GetKeyCode())
event.Skip()
def EvtCheckBox(self, event):
self.logger.AppendText('EvtCheckBox: %d\n' % event.Checked())
%%writefile control_demo.py
import wx
from example import ExamplePanel
app = wx.App(False)
frame = wx.Frame(None,size=(500,400))
panel = ExamplePanel(frame)
frame.Show()
app.MainLoop()
!python control_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
The notebook
Sometimes, a form grows too big to fit on a single page. The
wx.Notebook is used in that kind of case : It allows the user to navigate quickly between a small amount of pages by clicking on associated tabs. We implement this by putting the wx.Notebook instead of our form into the main Frame and then add our panel into the notebook by using method AddPage.
|
%%writefile notebook_demo.py
import wx
from example import ExamplePanel
app = wx.App(False)
frame = wx.Frame(None, title="Demo with Notebook",size=(500,400))
nb = wx.Notebook(frame)
nb.AddPage(ExamplePanel(nb), "Absolute Positioning")
nb.AddPage(ExamplePanel(nb), "Page Two")
nb.AddPage(ExamplePanel(nb), "Page Three")
frame.Show()
app.MainLoop()
!python notebook_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Improving the layout - using Sizers
Using absolute positioning is often not very satisfying: The result is ugly if the windows are not (for one reason or another) the right size. WxPython has very rich vocabulary of objects to lay out controls.
- wx.BoxSizer is the most common and simple layout object but it permits a vast range of possibilities. Its role is roughly to arrange a set of controls in a line or in a row and rearrange them when needed (i.e. when the global size is changed).
- wx.GridSizer and wx.FlexGridSizer are two very important layout tools. They arrange the controls in a tabular layout.
Here is the sample above re-written to use sizers:
|
%%writefile example.py
import wx
class ExamplePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# create some sizers
mainSizer = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(hgap=5, vgap=5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.quote = wx.StaticText(self, label="Your quote: ")
grid.Add(self.quote, pos=(0,0))
# A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it
self.logger = wx.TextCtrl(self, size=(200,300), style=wx.TE_MULTILINE | wx.TE_READONLY)
# A button
self.button =wx.Button(self, label="Save")
self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
# the edit control - one line version.
self.lblname = wx.StaticText(self, label="Your name :")
grid.Add(self.lblname, pos=(1,0))
self.editname = wx.TextCtrl(self, value="Enter here your name", size=(140,-1))
grid.Add(self.editname, pos=(1,1))
self.Bind(wx.EVT_TEXT, self.EvtText, self.editname)
self.Bind(wx.EVT_CHAR, self.EvtChar, self.editname)
# the combobox Control
self.sampleList = ['friends', 'advertising', 'web search', 'Yellow Pages']
self.lblhear = wx.StaticText(self, label="How did you hear from us ?")
grid.Add(self.lblhear, pos=(3,0))
self.edithear = wx.ComboBox(self, size=(95, -1),
choices=self.sampleList,
style=wx.CB_DROPDOWN)
grid.Add(self.edithear, pos=(3,1))
self.Bind(wx.EVT_COMBOBOX, self.EvtComboBox, self.edithear)
self.Bind(wx.EVT_TEXT, self.EvtText,self.edithear)
# add a spacer to the sizer
grid.Add((10, 40), pos=(2,0))
# Checkbox
self.insure = wx.CheckBox(self, label="Do you want Insured Shipment ?")
grid.Add(self.insure, pos=(4,0), span=(1,2),
flag=wx.BOTTOM, border=5)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.insure)
# Radio Boxes
radioList = ['blue', 'red', 'yellow', 'orange', 'green', 'purple', 'navy blue', 'black', 'gray']
rb = wx.RadioBox(self, label="What color would you like ?", pos=(20, 210), choices=radioList, majorDimension=3,
style=wx.RA_SPECIFY_COLS)
grid.Add(rb, pos=(5,0), span=(1,2))
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
hSizer.Add(grid, 0, wx.ALL, 5)
hSizer.Add(self.logger)
mainSizer.Add(hSizer, 0, wx.ALL, 5)
mainSizer.Add(self.button, 0, wx.CENTER)
self.SetSizerAndFit(mainSizer)
def EvtRadioBox(self, event):
self.logger.AppendText('EvtRadioBox: %d\n' % event.GetInt())
def EvtComboBox(self, event):
self.logger.AppendText('EvtComboBox: %s\n' % event.GetString())
def OnClick(self,event):
self.logger.AppendText(" Click on object with Id %d\n" %event.GetId())
def EvtText(self, event):
self.logger.AppendText('EvtText: %s\n' % event.GetString())
def EvtChar(self, event):
self.logger.AppendText('EvtChar: %d\n' % event.GetKeyCode())
event.Skip()
def EvtCheckBox(self, event):
self.logger.AppendText('EvtCheckBox: %d\n' % event.Checked())
%%writefile control_demo.py
import wx
from example import ExamplePanel
app = wx.App(False)
frame = wx.Frame(None,size=(500,400))
panel = ExamplePanel(frame)
frame.Show()
app.MainLoop()
!python control_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
wxpython and matplotlib
To use matplotlib to plot or show images in a panel, we rely on the matplotlib library.
|
%%writefile mpl_demo.py
#!/usr/bin/env python
#import wxversion
#wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as
FigureCanvas
from matplotlib.figure import Figure
import wx
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
'CanvasFrame', size=(550, 350))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
!python mpl_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Let's see how this works.
python
self.figure = Figure()
initializes the top level container for all plot elements. Everything in the
plot goes within this object, just like everything in our wx application goes into
our frame!
python
self.axes = self.figure.add_subplot(111)
Our figure can contain many subplots, but here we will only make
one. add_subplot() is what does this for us. The 111 is a grid
parameter, encoded as an integer. It means 1x1 grid, first subplot. If
you want two subplots, the number would be 2x1; the first subplot
would be 211, and the second subplot would be 212.
python
self.axes.plot(t, s)
t and s are what I chose for axis values.
They are arrays that contain values that link with each other to form our plot. These arrays
must have the same size!
This command creates and plots the t and s arrays. Since self.axes was defined as a
subplot of self.figure, this also plays a role in generating
self.figure, the container of our subplot.
python
self.canvas = FigureCanvas(self, -1, self.figure)
Finally, we have our canvas object, which paints our object onto the
screen. Simply pass in our figure and the FigureCanvas tool does the
rest.
Navigation toolbar
A useful toolbar is the navigation toolbar defined in matplotlib which allows one to explore the image.
Let's add to our previous example this toolbar.
|
%%writefile mpl_demo.py
#!/usr/bin/env python
import wxversion
wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigureCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
from matplotlib.figure import Figure
import wx
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
'CanvasFrame', size=(550, 350))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.add_toolbar() #add toolbar
def add_toolbar(self):
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Realize()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
!python mpl_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
It is possible to use this navigation toolbar as a starting point to add more buttons and capabilities.
This is another example from the matplotlib demo library.
|
%%writefile mpl_demo.py
#!/usr/bin/env python
import wxversion
wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
from matplotlib.backends.backend_wx import _load_bitmap
from matplotlib.figure import Figure
from numpy.random import rand
import wx
class MyNavigationToolbar(NavigationToolbar2WxAgg):
"""
Extend the default wx toolbar with your own event handlers
"""
ON_CUSTOM = wx.NewId()
def __init__(self, canvas, cankill):
NavigationToolbar2WxAgg.__init__(self, canvas)
# for simplicity I'm going to reuse a bitmap from wx, you'll
# probably want to add your own.
if 'phoenix' in wx.PlatformInfo:
self.AddTool(self.ON_CUSTOM, 'Click me',
_load_bitmap('stock_left.xpm'),
'Activate custom contol')
self.Bind(wx.EVT_TOOL, self._on_custom, id=self.ON_CUSTOM)
else:
self.AddSimpleTool(self.ON_CUSTOM, _load_bitmap('stock_left.xpm'),
'Click me', 'Activate custom contol')
self.Bind(wx.EVT_TOOL, self._on_custom, id=self.ON_CUSTOM)
def _on_custom(self, evt):
# add some text to the axes in a random location in axes (0,1)
# coords) with a random color
# get the axes
ax = self.canvas.figure.axes[0]
# generate a random location can color
x, y = tuple(rand(2))
rgb = tuple(rand(3))
# add the text and draw
ax.text(x, y, 'You clicked me',
transform=ax.transAxes,
color=rgb)
self.canvas.draw()
evt.Skip()
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
'CanvasFrame', size=(550, 350))
self.figure = Figure(figsize=(5, 4), dpi=100)
self.axes = self.figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# Capture the paint message
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.toolbar = MyNavigationToolbar(self.canvas, True)
self.toolbar.Realize()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
self.SetSizer(self.sizer)
self.Fit()
def OnPaint(self, event):
self.canvas.draw()
event.Skip()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
!python mpl_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Matplotlib examples
We give here a few more examples to show the various capabilities.
Buttons
|
%%writefile buttons_demo.py
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
freqs = np.arange(2, 20, 3)
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.2)
t = np.arange(0.0, 1.0, 0.001)
s = np.sin(2*np.pi*freqs[0]*t)
l, = plt.plot(t, s, lw=2)
class Index(object):
ind = 0
def next(self, event):
self.ind += 1
i = self.ind % len(freqs)
ydata = np.sin(2*np.pi*freqs[i]*t)
l.set_ydata(ydata)
plt.draw()
def prev(self, event):
self.ind -= 1
i = self.ind % len(freqs)
ydata = np.sin(2*np.pi*freqs[i]*t)
l.set_ydata(ydata)
plt.draw()
callback = Index()
axprev = plt.axes([0.7, 0.05, 0.1, 0.075])
axnext = plt.axes([0.81, 0.05, 0.1, 0.075])
bnext = Button(axnext, 'Next')
bnext.on_clicked(callback.next)
bprev = Button(axprev, 'Previous')
bprev.on_clicked(callback.prev)
plt.show()
!python buttons_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Check Buttons
|
%%writefile checkbuttons_demo.py
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import CheckButtons
t = np.arange(0.0, 2.0, 0.01)
s0 = np.sin(2*np.pi*t)
s1 = np.sin(4*np.pi*t)
s2 = np.sin(6*np.pi*t)
fig, ax = plt.subplots()
l0, = ax.plot(t, s0, visible=False, lw=2)
l1, = ax.plot(t, s1, lw=2)
l2, = ax.plot(t, s2, lw=2)
plt.subplots_adjust(left=0.2)
rax = plt.axes([0.05, 0.4, 0.1, 0.15])
check = CheckButtons(rax, ('2 Hz', '4 Hz', '6 Hz'), (False, True, True))
def func(label):
if label == '2 Hz':
l0.set_visible(not l0.get_visible())
elif label == '4 Hz':
l1.set_visible(not l1.get_visible())
elif label == '6 Hz':
l2.set_visible(not l2.get_visible())
plt.draw()
check.on_clicked(func)
plt.show()
!python checkbuttons_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Cursor
|
%%writefile cursor_demo.py
#!/usr/bin/env python
from matplotlib.widgets import Cursor
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, axisbg='#FFFFCC')
x, y = 4*(np.random.rand(2, 100) - .5)
ax.plot(x, y, 'o')
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
# set useblit = True on gtkagg for enhanced performance
cursor = Cursor(ax, useblit=True, color='red', linewidth=2)
plt.show()
!python cursor_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Rectangle selector
|
%%writefile rectsel.py
#!/usr/bin/env python
"""
Do a mouseclick somewhere, move the mouse to some destination, release
the button. This class gives click- and release-events and also draws
a line or a box from the click-point to the actual mouseposition
(within the same axes) until the button is released. Within the
method 'self.ignore()' it is checked wether the button from eventpress
and eventrelease are the same.
"""
from matplotlib.widgets import RectangleSelector
import numpy as np
import matplotlib.pyplot as plt
def line_select_callback(eclick, erelease):
'eclick and erelease are the press and release events'
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
print(" The button you used were: %s %s" % (eclick.button, erelease.button))
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
fig, current_ax = plt.subplots() # make a new plotingrange
N = 100000 # If N is large one can see
x = np.linspace(0.0, 10.0, N) # improvement by use blitting!
plt.plot(x, +np.sin(.2*np.pi*x), lw=3.5, c='b', alpha=.7) # plot something
plt.plot(x, +np.cos(.2*np.pi*x), lw=3.5, c='r', alpha=.5)
plt.plot(x, -np.sin(.2*np.pi*x), lw=3.5, c='g', alpha=.3)
print("\n click --> release")
# drawtype is 'box' or 'line' or 'none'
toggle_selector.RS = RectangleSelector(current_ax, line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
plt.connect('key_press_event', toggle_selector)
plt.show()
!python rectsel.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Slider
|
%%writefile slider_demo.py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t, s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
!python slider_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Span selector
|
%%writefile span_demo.py
#!/usr/bin/env python
"""
The SpanSelector is a mouse widget to select a xmin/xmax range and plot the
detail view of the selected region in the lower axes
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import SpanSelector
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(211, axisbg='#FFFFCC')
x = np.arange(0.0, 5.0, 0.01)
y = np.sin(2*np.pi*x) + 0.5*np.random.randn(len(x))
ax.plot(x, y, '-')
ax.set_ylim(-2, 2)
ax.set_title('Press left mouse button and drag to test')
ax2 = fig.add_subplot(212, axisbg='#FFFFCC')
line2, = ax2.plot(x, y, '-')
def onselect(xmin, xmax):
indmin, indmax = np.searchsorted(x, (xmin, xmax))
indmax = min(len(x) - 1, indmax)
thisx = x[indmin:indmax]
thisy = y[indmin:indmax]
line2.set_data(thisx, thisy)
ax2.set_xlim(thisx[0], thisx[-1])
ax2.set_ylim(thisy.min(), thisy.max())
fig.canvas.draw()
# set useblit True on gtkagg for enhanced performance
span = SpanSelector(ax, onselect, 'horizontal', useblit=True,
rectprops=dict(alpha=0.5, facecolor='red'))
plt.show()
!python span_demo.py
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
Further reading
http://t2mh.com/python/wxPython%20in%20Action%20(2006).pdf
https://www.tutorialspoint.com/wxpython/wxpython_tutorial.pdf
Versions
|
%load_ext version_information
%version_information wxpython
|
Lecture-widgets.ipynb
|
darioflute/CS4A
|
gpl-3.0
|
A lot of the following code is taken from or inspired by this excellent <a href = "http://brandonrose.org/clustering", target="_blank">Document Clustering</a> tutorial
|
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(use_idf=True, ngram_range =(1,3))
train_data_features = vectorizer.fit_transform(clean_tweets)
terms = vectorizer.get_feature_names()
from sklearn.cluster import KMeans
num_clusters = 15
km = KMeans(n_clusters=num_clusters)
km.fit(train_data_features)
clusters = km.labels_.tolist()
clusterframe = pd.DataFrame(clusters, columns = ["cluster"]) #turns the list of clusters into a dataframe
clustered_debate = pd.concat([vinb, clusterframe], axis = 1) #combines the tweets with the clusters
clustered_debate.head()
from __future__ import print_function
print("Top terms per cluster:")
print()
#sort cluster centers by proximity to centroid
order_centroids = km.cluster_centers_.argsort()[:, ::-1] #from start to finish, reverse array
for i in range(num_clusters):
print("Cluster %d words:" % i, end='')
for ind in order_centroids[i, :5]: #will print 5 most common words
print(' %s' % terms[ind], end=',')
print()
print('Length: %d' % len(clustered_debate.Text[clustered_debate.cluster == i])) #prints cluster length, ie no of tweets in each cluster
print()
|
#vinb - Cluster Tweets.ipynb
|
maniacalbrain/Cluster-vinb-tweets
|
mit
|
With no ngrams_range set (default of 1) there was a tendency for an "uber-cluster" to appear. Even at 30 clusters one of them contained over 33% of the tweets. However, the tweets in the other clusters seemed to be very strongly correlated. In the above, with ngrams_range set to (1, 3) the important words seems a lot better but the clusters themselves often contain very disparate tweets.
Below are sample tweets from each of the clusters.
|
for i in range(num_clusters):
print("Cluster %d words:" % i, end='')
for ind in order_centroids[i, :5]:
print(' %s' % terms[ind], end=',')
print()
for text in pd.DataFrame(clustered_debate.Text[clustered_debate.cluster == i]).Text.head(10):
print(text)
print()
print()
|
#vinb - Cluster Tweets.ipynb
|
maniacalbrain/Cluster-vinb-tweets
|
mit
|
In the following block change the number on the first row to that of the cluster you want and change the number on the second row to the number of tweets from that cluster that you want. The below example prints out 10 tweets from cluster0
|
display = pd.DataFrame(clustered_debate.Text[clustered_debate.cluster == 0])
for text in display.Text.head(10):
print(text)
|
#vinb - Cluster Tweets.ipynb
|
maniacalbrain/Cluster-vinb-tweets
|
mit
|
We can plot these points on a scatterplot. In the following, a + means "Rain" and a - is "Sun" (no rain).
Classification is defined as the task of predicting the correct label or category of an unknown point. With two classes, we divide the data space into two halves, one for each class. So when we receive a new point, we simply find which side of the partition the point is in.
k-nearest neighbors classification
We will introduce a simple technique for classification called k-nearest neighbors classification (kNN). Before doing that, we are going to scale up our problem with a slightly more realistic dataset called Iris, which is commonly used to introduce data science tasks.
Iris is a dataset containing 150 samples of flowers of the Iris genus, belonging to three different species (Iris setosa, Iris virginica, Iris versicolor). The dataset records their species (which is the class label), along with the following features: Petal Length, Petal Width, Sepal Length, and Sepal width.
In the next cell, we import the dataset, and shuffle it.
|
import numpy as np
from sklearn.datasets import load_iris
# load iris and grab our data and labels
iris = load_iris()
labels, data = iris.target, iris.data
num_samples = len(labels) # size of our dataset
num_features = len(iris.feature_names) # number of columns/variables
# shuffle the dataset
shuffle_order = np.random.permutation(num_samples)
data = data[shuffle_order, :]
labels = labels[shuffle_order]
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
Let's view a table showing the first 20 samples.
|
label_names = np.array([iris.target_names[l] for l in labels])
table_labels = np.array(['class']+iris.feature_names).reshape((1, 1+num_features))
class_names = iris.target_names
table_data = np.concatenate([np.array(label_names).reshape(num_samples, 1), data], axis=1)[0:20]
# display table
table_full = np.concatenate([table_labels, table_data], axis=0)
display(HTML(tabulate.tabulate(table_full, tablefmt='html')))
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
For simplicity, we will restrict our attention to just the first two features, sepal width and sepal length. Let's plot the dataset.
|
# plot the original data
x, y, lab = data[:, 0], data[:, 1], labels
plt.figure(figsize=(8, 6))
plt.scatter(x, y, c=lab)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Iris dataset')
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
Suppose we are given a new point whose sepal length (x) and sepal width (y) are the following:
|
new_x, new_y = 6.5, 3.7
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
Let's plot it on the graph. What could its class be?
|
# plot the original data
x, y, lab = data[:, 0], data[:, 1], labels
plt.figure(figsize=(8, 6))
plt.scatter(x, y, c=lab)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Iris dataset')
# put the new point on top
plt.scatter(new_x, new_y, c='grey', cmap=None, edgecolor='k')
plt.annotate('?', (new_x+0.45, new_y+0.25), fontsize=20, horizontalalignment='center', verticalalignment='center')
plt.annotate("", xytext=(new_x+0.4, new_y+0.2), xy=(new_x+0.05, new_y), arrowprops=dict(arrowstyle="->"))
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
Our simple approach to predicting the new point's label is to find the point in the dataset which is closest to the new point, and copying its label.
|
# calculate the distance between the new point and each of the points in our labeled dataset
distances = np.sum((data[:,0:2] - [new_x, new_y])**2, axis=1)
# find the index of the point whose distance is lowest
closest_point = np.argmin(distances)
# take its label
new_label = labels[closest_point]
print('Predicted label: %d'%new_label)
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
That's it! That is k-nearest neighbors where we set k = 1. If k > 1, we find the k closest points and take a vote among them.
We can now plot the newly-labeled point on top of the dataset.
|
# append the newly labeled point in our dataset
x = np.append(x, new_x)
y = np.append(y, new_y)
lab = np.append(lab, new_label)
# scatter plot as before
plt.figure(figsize=(8, 6))
plt.scatter(x, y, c=lab)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Iris dataset')
plt.annotate("", xytext=(x[closest_point]+0.02, y[closest_point]+0.02), xy=(new_x-0.02, new_y-0.02), arrowprops=dict(arrowstyle="->"))
|
examples/fundamentals/classification_kNN.ipynb
|
ml4a/ml4a-guides
|
gpl-2.0
|
1. Load the LAS file with lasio
|
import lasio
l = lasio.read('../data/P-129.LAS') # Line 1.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
That's it! But the object itself doesn't tell us much — it's really just a container:
|
l
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
2. Look at the WELL section of the header
|
l.header['Well'] # Line 2.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
You can go in and find the KB if you know what to look for:
|
l.header['Parameter']['EKB']
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
3. Look at the curve data
The curves are all present one big NumPy array:
|
l.data
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
Or we can go after a single curve object:
|
l.curves.GR # Line 3.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
And there's a shortcut to its data:
|
l['GR'] # Line 4.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
...so it's easy to make a plot against depth:
|
import matplotlib.pyplot as plt
plt.figure(figsize=(15,3))
plt.plot(l['DEPT'], l['GR'])
plt.show()
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
4. Inspect the curves as a pandas dataframe
|
l.df().head() # Line 5.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
5. Load the LAS file with welly
|
from welly import Well
w = Well.from_las('../data/P-129.LAS') # Line 6.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
welly Wells know how to display some basics:
|
w
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
And the Well object also has lasio's access to a pandas DataFrame:
|
w.df().head()
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
6. Look at welly's Curve object
Like the Well, a Curve object can report a bit about itself:
|
gr = w.data['GR'] # Line 7.
gr
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
One important thing about Curves is that each one knows its own depths — they are stored as a property called basis. (It's not actually stored, but computed on demand from the start depth, the sample interval (which must be constant for the whole curve) and the number of samples in the object.)
|
gr.basis
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
7. Plot part of a curve
We'll grab the interval from 300 m to 1000 m and plot it.
|
gr.to_basis(start=300, stop=1000).plot() # Line 8.
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
8. Smooth a curve
Curve objects are, fundamentally, NumPy arrays. But they have some extra tricks. We've already seen Curve.plot().
Using the Curve.smooth() method, we can easily smooth a curve, eg by 15 m (passing samples=True would smooth by 15 samples):
|
sm = gr.smooth(window_length=15, samples=False) # Line 9.
sm.plot()
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
9. Export a set of curves as a matrix
You can get at all the data through the lasio l.data object:
|
print("Data shape: {}".format(w.las.data.shape))
w.las.data
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
But we might want to do some other things, such as specify which curves you want (optionally using aliases like GR1, GRC, NGC, etc for GR), resample the data, or specify a start and stop depth — welly can do all this stuff. This method is also wrapped by Project.data_as_matrix() which is nice because it ensures that all the wells are exported at the same sample interval.
Here are the curves in this well:
|
w.data.keys()
keys=['CALI', 'DT', 'DTS', 'RHOB', 'SP']
w.plot(tracks=['TVD']+keys)
X, basis = w.data_as_matrix(keys=keys, start=275, stop=1850, step=0.5, return_basis=True)
w.data['CALI'].shape
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
So CALI had 12,718 points in it... since we downsampled to 0.5 m and removed the top and tail, we should have substantially fewer points:
|
X.shape
plt.figure(figsize=(15,3))
plt.plot(X.T[0])
plt.show()
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
10+. BONUS: fix the lat, lon
OK, we're definitely going to go over our budget on this one.
Did you notice that the location of the well did not get loaded properly?
|
w.location
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
Let's look at some of the header:
# LAS format log file from PETREL
# Project units are specified as depth units
#==================================================================
~Version information
VERS. 2.0:
WRAP. YES:
#==================================================================
~WELL INFORMATION
#MNEM.UNIT DATA DESCRIPTION
#---- ------ -------------- -----------------------------
STRT .M 1.0668 :START DEPTH
STOP .M 1939.13760 :STOP DEPTH
STEP .M 0.15240 :STEP
NULL . -999.25 :NULL VALUE
COMP . Elmworth Energy Corporation :COMPANY
WELL . Kennetcook #2 :WELL
FLD . Windsor Block :FIELD
LOC . Lat = 45* 12' 34.237" N :LOCATION
PROV . Nova Scotia :PROVINCE
UWI. Long = 63* 45'24.460 W :UNIQUE WELL ID
LIC . P-129 :LICENSE NUMBER
CTRY . CA :COUNTRY (WWW code)
DATE. 10-Oct-2007 :LOG DATE {DD-MMM-YYYY}
SRVC . Schlumberger :SERVICE COMPANY
LATI .DEG :LATITUDE
LONG .DEG :LONGITUDE
GDAT . :GeoDetic Datum
SECT . 45.20 Deg N :Section
RANG . PD 176 :Range
TOWN . 63.75 Deg W :Township
Look at LOC and UWI. There are two problems:
These items are in the wrong place. (Notice LATI and LONG are empty.)
The items are malformed, with lots of extraneous characters.
We can fix this in two steps:
Remap the header items to fix the first problem.
Parse the items to fix the second one.
We'll define these in reverse because the remapping uses the transforming function.
|
import re
def transform_ll(text):
"""
Parses malformed lat and lon so they load properly.
"""
def callback(match):
d = match.group(1).strip()
m = match.group(2).strip()
s = match.group(3).strip()
c = match.group(4).strip()
if c.lower() in ('w', 's') and d[0] != '-':
d = '-' + d
return ' '.join([d, m, s])
pattern = re.compile(r""".+?([-0-9]+?).? ?([0-9]+?).? ?([\.0-9]+?).? +?([NESW])""", re.I)
text = pattern.sub(callback, text)
return welly.utils.dms2dd([float(i) for i in text.split()])
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
Make sure that works!
|
print(transform_ll("""Lat = 45* 12' 34.237" N"""))
remap = {
'LATI': 'LOC', # Use LOC for the parameter LATI.
'LONG': 'UWI', # Use UWI for the parameter LONG.
'LOC': None, # Use nothing for the parameter SECT.
'SECT': None, # Use nothing for the parameter SECT.
'RANG': None, # Use nothing for the parameter RANG.
'TOWN': None, # Use nothing for the parameter TOWN.
}
funcs = {
'LATI': transform_ll, # Pass LATI through this function before loading.
'LONG': transform_ll, # Pass LONG through it too.
'UWI': lambda x: "No UWI, fix this!"
}
w = Well.from_las('../data/P-129.LAS', remap=remap, funcs=funcs)
w.location.latitude, w.location.longitude
w.uwi
|
notebooks/08_Read_and_write_LAS.ipynb
|
agile-geoscience/xlines
|
apache-2.0
|
数据理解
|
data_train.info()
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
上面的数据说啥了?它告诉我们,训练数据中总共有891名乘客,但是很不幸,我们有些属性的数据不全,比如说:
- Age(年龄)属性只有714名乘客有记录
- Cabin(客舱)更是只有204名乘客是已知的
似乎信息略少啊,想再瞄一眼具体数据数值情况呢?恩,我们用下列的方法,得到数值型数据的一些分布(因为有些属性,比如姓名,是文本型;而另外一些属性,比如登船港口,是类目型。这些我们用下面的函数是看不到的):
|
data_train.describe()
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
我们从上面看到更进一步的什么信息呢?
mean字段告诉我们,大概0.383838的人最后获救了,2/3等舱的人数比1等舱要多,平均乘客年龄大概是29.7岁(计算这个时候会略掉无记录的)等等…
这个时候我们可能会有一些想法了:
- 不同舱位/乘客等级可能和财富/地位有关系,最后获救概率可能会不一样
- 年龄对获救概率也一定是有影响的,毕竟前面说了,副船长还说『小孩和女士先走』呢
- 和登船港口是不是有关系呢?也许登船港口不同,人的出身地位不同?
口说无凭,空想无益。老老实实再来统计统计,看看这些属性值的统计分布吧。
|
plt.figure(figsize=(12,8))
plt.subplot(2,3,1)
sns.countplot(x='Pclass',hue='Survived',data=data_train)
plt.title('Pclass vs Survived')
plt.subplot(2,3,2)
sns.countplot(x='Sex',hue='Survived',data=data_train)
plt.title('Sex vs Survived')
plt.subplot(2,3,3)
sns.countplot(x='Embarked',hue='Survived',data=data_train)
plt.title('Embarked vs Survived')
plt.subplot(2,2,3)
sns.countplot(x='SibSp',hue='Survived',data=data_train)
plt.title('SibSp vs Survived')
plt.subplot(2,2,4)
sns.countplot(x='Parch',hue='Survived',data=data_train)
plt.title('Parch vs Survived')
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
结论:
- 钱和地位对舱位有影响,进而对获救的可能性也有影响
- 很尊重lady,lady first践行得不错。性别无疑也要作为重要特征加入最后的模型之中
- 登船港口居然是有影响的
- 单身一个人,获救的可能性还是比较低的
|
fig = plt.figure()
fig.set(alpha=0.2)
data_train.Age[data_train.Survived == 0].plot(kind='kde')
data_train.Age[data_train.Survived == 1].plot(kind='kde')
#ticket是船票编号,应该是unique的,和最后的结果没有太大的关系,先不纳入考虑的特征范畴把
#cabin只有204个乘客有值,我们先看看它的一个分布
data_train.Cabin.value_counts()
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
Cabin这鬼属性,应该算作类目型的,本来缺失值就多,还如此不集中,注定是个棘手货…第一感觉,这玩意儿如果直接按照类目特征处理的话,太散了,估计每个因子化后的特征都拿不到什么权重。-----> 找技术人员去了解什么含义。。。
为方便,我们以Un表示没有的情况
数据预处理与转化
|
data_train = pd.read_csv("../kaggle_titanic/data/train.csv",index_col='PassengerId')
data_test = pd.read_csv("../kaggle_titanic/data/test.csv",index_col='PassengerId')
data = pd.concat([data_train, data_test],axis=0)
data.info()
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
缺失值处理
|
data.Embarked[data.Embarked.isnull()] = data.Embarked.dropna().mode().values
data.groupby(data.Pclass).mean()['Fare']
data.Fare[data.Fare.isnull()] = 13.302889
from sklearn.ensemble import RandomForestRegressor
age_df = data[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
rf_age = RandomForestRegressor(n_estimators=1000,n_jobs=-1)
rf_age.fit(known_age[:,1:], known_age[:,0])
rf_predict = rf_age.predict(unknown_age[:,1:])
data.loc[data.Age.isnull(),"Age"] = rf_predict
data.loc[data.Cabin.notnull(),"Cabin"] = "Know"
data.loc[data.Cabin.isnull(),"Cabin"] = "Unknow"
data.info()
### 特征转化
dummies_Cabin = pd.get_dummies(data['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data['Pclass'], prefix= 'Pclass')
df = pd.concat([data, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
df.head(5)
### 去纲量化
from sklearn.preprocessing import StandardScaler
age_scaler = StandardScaler().fit(df.Age)
df['Age_scaled'] = age_scaler.transform(df.Age.values.reshape(-1,1))
fare_scaler = StandardScaler().fit(df.Fare)
df['Fare_scaled'] = age_scaler.transform(df.Fare.values.reshape(-1,1))
df.drop(['Age','Fare'],axis=1,inplace=True)
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
建模
|
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import sklearn.externals.joblib as joblib
X = df.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
y = df.loc[:,'Survived']
X_train = X.loc[y.notnull(),:]
y_train = y.loc[y.notnull()]
X_test = X.loc[y.isnull(),:]
# 保存训练数据,后面会用到
from cPickle import dump
data = (np.array(X_train),np.array(y_train))
with file("data/train_data","wb") as f:
dump(data, f)
lr = LogisticRegression()
lr.fit(X_train,y_train)
print lr.score(X_train,y_train)
#
y_test = lr.predict(X_test)
ret = pd.DataFrame(y_test, index=X_test.index,columns=['survival'],dtype=np.int32)
ret.to_csv("../kaggle_titanic/res/lr_0.csv")
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
提交模型,在测试集上的准确率为76.6%, 排名2700++
|
# 保存模型
joblib.dump(lr, '../kaggle_titanic/model/lr.ckpt')
# lr_read = joblib.load('../kaggle_titanic/model/lr.ckpt')
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
改进
|
import cPickle
with open("../kaggle_titanic/data/train_data","rb") as f:
X_train, y_train = cPickle.load(f)
# 因为没有测试集,我们使用KFold来评估模型的有效性
lr = LogisticRegression()
score = cross_val_score(lr,X_train,y_train,cv=5)
score.mean()
# 我们使用sklearn选择特征
from sklearn.feature_selection import SelectKBest
skb = SelectKBest(k=12)
X30 = skb.fit_transform(X_train,y_train)
lr = LogisticRegression()
score = cross_val_score(lr,X30,y_train,cv=5)
score.mean()
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
特征选择无益于效果的提升,可能是因为特征本身比较少,相关性都比较强。
如果在文件分类中,特征数据多达百万,这时进行特征选择,是很必要的。
|
# 流水线 + 优化
clf = Pipeline([('skb',SelectKBest(k=100)),('lr',LogisticRegression(C=1))])
grid_param = {'skb__k':[8,9,10,11,12,13,14],
'lr__C':[0.01, 0.1, 1, 10, 100],
'lr__penalty':['l1','l2']}
grid = GridSearchCV(clf, grid_param, n_jobs=-1, cv=5)
grid.fit(X_train,y_train)
print grid.best_params_
print grid.best_score_
#使用调优后的数据
clf = Pipeline([('skb',SelectKBest(k=14)),('lr',LogisticRegression(C=0.1,penalty='l2'))])
clf.fit(X_train, y_train)
print clf.score(X_train,y_train)
#
y_test =clfpp.predict(X_test)
ret = pd.DataFrame(y_test, index=X_test.index,columns=['survival'],dtype=np.int32)
ret.to_csv("../kaggle_titanic/res/lr_1.csv")
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
提交结果,得分为0.78,排名2000+。
|
# 学习曲线
from sklearn.model_selection import learning_curve
pp = Pipeline([('skb',SelectKBest(k=14)),('lr',LogisticRegression(C=0.1))])
train_sizes, train_scores, test_scores = learning_curve(pp, X_train, y_train, cv=5, n_jobs=-1, train_sizes=np.linspace(0.2, 1.0, 20))
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.figure()
plt.plot(train_sizes, train_scores_mean, 'bo-')
plt.fill_between(train_sizes, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std,alpha=0.1, color='b')
plt.plot(train_sizes, test_scores_mean, 'ro-')
plt.fill_between(train_sizes, test_scores_mean-test_scores_std, test_scores_mean+test_scores_std,alpha=0.1, color='r')
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
改进2
|
from sklearn.model_selection import train_test_split
X_tra, X_val, y_tra, y_val = train_test_split(X_train,y_train, test_size=0.3, random_state=0)
lrl1 = LogisticRegression(C=1.0, penalty="l1", tol=1e-6)
lrl1.fit(X_tra, y_tra)
y_val_pred = lrl1.predict(X_val)
val_pid_set = X_val.iloc[y_val.tolist() != y_val_pred,:].index
data_train.iloc[data_train.index.isin(val_pid_set),:].head(10)
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
我们随便列一些可能可以做的优化操作:
* Age属性不使用现在的拟合方式,而是根据名称中的『Mr』『Mrs』『Miss』等的平均值进行填充。
* Age不做成一个连续值属性,而是使用一个步长进行离散化,变成离散的类目feature。
* Cabin再细化一些,对于有记录的Cabin属性,我们将其分为前面的字母部分(我猜是位置和船层之类的信息) 和 后面的数字部分(应该是房间号,有意思的事情是,如果你仔细看看原始数据,你会发现,这个值大的情况下,似乎获救的可能性高一些)。
* Pclass和Sex俩太重要了,我们试着用它们去组出一个组合属性来试试,这也是另外一种程度的细化。
* 单加一个Child字段,Age<=12的,设为1,其余为0(你去看看数据,确实小盆友优先程度很高啊)
* 如果名字里面有『Mrs』,而Parch>1的,我们猜测她可能是一个母亲,应该获救的概率也会提高,因此可以多加一个Mother字段,此种情况下设为1,其余情况下设为0
* 登船港口可以考虑先去掉试试(Q和C本来就没权重,S有点诡异)
* 把堂兄弟/兄妹 和 Parch 还有自己 个数加在一起组一个Family_size字段(考虑到大家族可能对最后的结果有影响)
* Name是一个我们一直没有触碰的属性,我们可以做一些简单的处理,比如说男性中带某些字眼的(‘Capt’, ‘Don’, ‘Major’, ‘Sir’)可以统一到一个Title,女性也一样。
进一步和优化特征,把以把得分做到0.804,基本可以排到前700+了。
模型整合
|
from sklearn.ensemble import BaggingClassifier
clf = LogisticRegression(C=1.0, penalty="l1", tol=1e-6)
bag_clf = BaggingClassifier(clf, n_estimators=20, max_samples=0.75, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=-1)
bag_clf.fit(X_train, y_train)
bag_clf.score(X_train, y_train)
scores = cross_val_score(bag_clf, X_train, y_train, cv=5)
scores.mean()
|
sklearn_titanic.ipynb
|
gengyj/ml-basic-course
|
gpl-3.0
|
One single point at a time, define the error between the true label and the model's prediction of the label
|
total_error = 0.0
for x,y in zip(xs, ys):
y_model = m*x + b # the predicted model output; often called y_hat, but we'll use that later in notebook
total_error += (y-y_model)**2 # sum of squared error between true and predicted y; the "cost" to be minimized
|
notebooks/point_by_point_intro_to_tensorflow.ipynb
|
the-deep-learners/TensorFlow-LiveLessons
|
mit
|
Define optimizer as SSE-minimizing gradient descent
|
optimizer_operation = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(total_error)
# illustrate learning rate too high:
# optimizer_operation = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(total_error)
# optimizer_operation = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(total_error)
|
notebooks/point_by_point_intro_to_tensorflow.ipynb
|
the-deep-learners/TensorFlow-LiveLessons
|
mit
|
With the computational graph designed, we initialize a session to execute it
|
with tf.Session() as session:
session.run(initializer_operation)
n_epochs = 1000 # run notebook through with 10, then run again with 1000
for iteration in range(n_epochs):
session.run(optimizer_operation) # this line executes the graph once, taking a single step toward minimizing cost
slope, intercept = session.run([m, b]) # fetch the m and b operators
slope
intercept
|
notebooks/point_by_point_intro_to_tensorflow.ipynb
|
the-deep-learners/TensorFlow-LiveLessons
|
mit
|
Generating the data
We use the make_moons method from scikit-learn, which allows to generate directly a set of points and the associated labels.
|
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.2)
colz = {0: 'dodgerblue', 1:"goldenrod"}
for pt, cl in zip(X, y):
plt.scatter(pt[0],pt[1], color=colz[cl])
plt.scatter([], [], color='dodgerblue', label='0')
plt.scatter([], [], color='goldenrod', label='1')
plt.legend()
|
notebooks/chap_2_knn.ipynb
|
hrjn/ISLR_reading_group
|
gpl-3.0
|
Writing the KNN function
|
def knn(pt, X, y, k):
"""
Returns the class predicted for pt using the knn algorithm
"""
dists = []
# Put features and outputs in the same array:
data = np.stack((X[:,0], X[:,1], y), axis=1)
# Compute the distance between pt and all the data points:
dists = [euclidean(pt, [data_pt[0],data_pt[1]]) for data_pt in data]
# Add the corresponding outputs:
dists_cl = np.stack((np.array(dists), data[:,2]), axis=1)
# Order by ascending distance:
dists_cl_ordered = dict(dists_cl)
# Simplest case, less fuss than k>1:
if k==1:
one_nn_class = dists_cl_ordered[min(dists_cl_ordered.keys())]
return int(one_nn_class)
if k>1:
nearests = []
# Recursively pop the 1-nn from the distances:
for i in range(k):
curr_min = min(dists_cl_ordered.keys())
nearests.append([curr_min,dists_cl_ordered[curr_min]])
dists_cl_ordered.pop(curr_min, None)
# Get the majoritary class among the popped elements:
nearests_cl = np.array(nearests)[:,1].astype(int)
k_nn_class = np.argmax(np.bincount(nearests_cl))
return int(k_nn_class)
|
notebooks/chap_2_knn.ipynb
|
hrjn/ISLR_reading_group
|
gpl-3.0
|
Applying the KNN function to the moons dataset
|
# Create the grid:
resolution = 0.05
x1_min = min(X[:,0]) - 0.5
x1_max = max(X[:,0]) + 0.5
x2_min = min(X[:,1]) - 0.5
x2_max = max(X[:,1]) + 0.5
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\
np.arange(x2_min, x2_max, resolution))
pts_domain = np.stack((np.ravel(xx1), np.ravel(xx2)), axis=1)
pts_domain.shape
k_knn = 3
for ptdx, ptdy in tqdm(zip(pts_domain[:,0], pts_domain[:,1])):
plt.scatter(ptdx, ptdy, color=colz[knn([ptdx, ptdy], X, y, k_knn)],\
marker='.', s=3, \
alpha=0.7)
for pt, cl in zip(X, y):
plt.scatter(pt[0],pt[1], color=colz[cl])
plt.scatter([], [], color='dodgerblue', label='0')
plt.scatter([], [], color='goldenrod', label='1')
plt.legend()
plt.xlim([x1_min, x1_max])
plt.ylim([x2_min, x2_max])
plt.xlabel(r'$X_1$', fontsize=12)
plt.ylabel(r'$X_2$', fontsize=12)
|
notebooks/chap_2_knn.ipynb
|
hrjn/ISLR_reading_group
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.