code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="k71PopPzivSr"
# ## **TASK-2.Prediction using Unsupervised ML**
# + [markdown] id="esGijVBXjVgE"
# **import required libraries**
# + id="voXeE5ErhPfk"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# + id="E9r0jFqdiYkM"
from sklearn.datasets import load_iris
# + colab={"base_uri": "https://localhost:8080/"} id="ShDx4vOcjSGG" outputId="f5e31ea0-c719-4d51-c0c4-a6dd3005bda9"
iris=load_iris()
dir(iris)
# + colab={"base_uri": "https://localhost:8080/"} id="5iO6o8rOpCOL" outputId="edbc5cf3-8f90-4e73-ca8c-e886072845fd"
iris.target
# + colab={"base_uri": "https://localhost:8080/"} id="7rPXjET7t2Vc" outputId="48768016-6103-4feb-f02e-34436d1750c3"
iris.target_names
# + [markdown] id="2-JhZntUj0r6"
# **Converting into DataFrame**
# + id="oJbl-TV-jdf3"
iris_df=pd.DataFrame(data=iris.data,columns=iris.feature_names)
iris_df['target']=iris.target
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="Mz8PkeJUtxAZ" outputId="34b13bec-0fe2-4a3e-c9b7-5bb8b3a2916d"
iris_df.head()
# + [markdown] id="4mDc2MeZkBHd"
# #### **Using Elbow Method to determine k**
# + id="sQP_wPLIjf9g"
# finding number of clusters
x=iris_df.iloc[:,[0,1,2,3]]
# importing kmeans
from sklearn.cluster import KMeans
sse=[]
for i in range(1,15):
kmean=KMeans(n_clusters=i)
kmean.fit(x)
sse.append(kmean.inertia_)
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="2oBISg3clkhc" outputId="00e3a3cb-666c-4794-8d7c-29f4e00573fa"
# Plotting the elbow curve
plt.figure(figsize=(10,6))
plt.plot(range(1,15),sse)
plt.xlabel('K')
plt.ylabel('Sum of squared error')
plt.title('Elbow Curve')
# + [markdown] id="SzJON-N8mNGF"
# **Therefore we can clearly see that after k=3, the error decrease become almost constant.**
# **So, we choose number of clusters=3**
# + id="YOUpeyJsl8eC"
# Applying algorithm to iris dataset
kmean=KMeans(n_clusters=3)
y_kmean=kmean.fit_predict(x)
# + [markdown] id="29fqfVwApyoL"
# #### **Clusters predicted by model**
# + colab={"base_uri": "https://localhost:8080/"} id="x1AJfVdDnqPb" outputId="1ff07b1d-8aea-42f1-e336-bfca3eb1dd86"
y_kmean
# + colab={"base_uri": "https://localhost:8080/", "height": 393} id="MUlwiIFym958" outputId="fbef9e42-b68c-49fe-e287-a59a331a8924"
# Visualisation of clusters
plt.figure(figsize=(10,6))
plt.scatter(x[y_kmean == 0, 0], x[y_kmean == 0, 1], s = 100, c = 'red', label = 'setosa')
plt.scatter(x[y_kmean == 1, 0], x[y_kmean == 1, 1], s = 100, c = 'blue', label = 'versicolour')
plt.scatter(x[y_kmean == 2, 0], x[y_kmean == 2, 1],s = 100, c = 'green', label = 'virginica')
# Plotting the centroids of the clusters
plt.scatter(kmean.cluster_centers_[:, 0], kmean.cluster_centers_[:,1],
s = 100, c = 'yellow', label = 'Centroids')
plt.legend()
# + id="ZWQFlgM2s3GQ"
| TASK-2 Prediction_using_Unsupervised_ML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('Haarcascades/haarcascade_eye.xml')
# +
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| Open CV/Live-Face-Eye-Detection-using-OpenCV/Live Face eye detection .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import math, copy, os, yaml, subprocess
import initial,obstacle
import boundaryNumba as boundary
import stggNumba as stgg
import cfxyNumba as cfxy
import rhsNumba as rhs
import diffusionNumba as diffusion
import newgrdNumba as newgrd
import cip2dNumba as cip2d
import uxvxNumba as uxvx
import fricNumba as fric
import mkzeroNumba as mkzero
import hcalNumba as hcal
import matplotlib.pyplot as plt
# import matplotlib.animation as animation
# from matplotlib.animation import PillowWriter
# from matplotlib._version import get_versions as mplv
# +
# %%time
# Open Config File
with open('config.yml','r', encoding='utf-8') as yml:
config = yaml.safe_load(yml)
snu_0=float(config['snu_0'])
hmin=float(config['hmin'])
cw=float(config['cw'])
ep_alpha=float(config['ep_alpha'])
nx=int(config['nx']);ny=int(config['ny'])
chl=float(config['chl']);chb=float(config['chb'])
slope=float(config['slope']);xsize=float(config['xsize'])
qp=float(config['qp'])
snm=float(config['snm'])
g=float(config['g'])
j_west=int(config['j_west']);j_east=int(config['j_east'])
j_hdown=int(config['j_hdown'])
alh=float(config['alh']); lmax=int(config['lmax'])
etime=float(config['etime']);tuk=float(config['tuk'])
dt=float(config['dt'])
nx1=nx+1; ny1=ny+1;nym=int(ny/2)
nx2=nx+2; ny2=ny+2
xl=chl;yl=chb
ysize=xsize/xl*yl
dx=chl/nx; dy=chb/ny; area=dx*dy
g_sqrt=np.sqrt(g)
errmax=hmin
it_out=int(tuk/dt)
prm=np.zeros([nx2,ny2])
u=np.zeros([nx2,ny2]); un=np.zeros([nx2,ny2]); v=np.zeros([nx2,ny2]); vn=np.zeros([nx2,ny2])
hs=np.zeros([nx2,ny2]); h=np.zeros([nx2,ny2]); hn=np.zeros([nx2,ny2])
ijh=np.zeros([nx2,ny2],dtype=int)
v_up=np.zeros([nx2,ny2]); hs_up=np.zeros([nx2,ny2]); u_vp=np.zeros([nx2,ny2]); hs_vp=np.zeros([nx2,ny2])
eta=np.zeros([nx2,ny2]); ep=np.zeros([nx2,ny2]); ep_x=np.zeros([nx2,ny2]); usta=np.zeros([nx2,ny2])
up=np.zeros([nx2,ny2]); vp=np.zeros([nx2,ny2])
qu=np.zeros([nx2,ny2]); qv=np.zeros([nx2,ny2]); qc=np.zeros([nx2])
x_center=np.zeros([nx2]); eta_center=np.zeros([nx2]); h_center=np.zeros([nx2])
qu_center=np.zeros([nx2]);hs_center=np.zeros([nx2])
gux=np.zeros([nx2,ny2]); guy=np.zeros([nx2,ny2]); gvx=np.zeros([nx2,ny2]); gvy=np.zeros([nx2,ny2])
gux_n=np.zeros([nx2,ny2]); guy_n=np.zeros([nx2,ny2])
gvx_n=np.zeros([nx2,ny2]); gvy_n=np.zeros([nx2,ny2])
cfx=np.zeros([nx2,ny2]); cfy=np.zeros([nx2,ny2]); qbx=np.zeros([nx2,ny2]); qby=np.zeros([nx2,ny2])
uvis=np.zeros([nx2,ny2]);uvis_x=np.zeros([nx2,ny2]);uvis_y=np.zeros([nx2,ny2])
vvis=np.zeros([nx2,ny2]);vvis_x=np.zeros([nx2,ny2]);vvis_y=np.zeros([nx2,ny2])
fn=np.zeros([nx2,ny2]);gxn=np.zeros([nx2,ny2]);gyn=np.zeros([nx2,ny2])
ux=np.zeros([nx1,ny1]);vx=np.zeros([nx1,ny1]); uv2=np.zeros([nx1,ny1])
hx=np.zeros([nx1,ny1]);hsx=np.zeros([nx1,ny1]);vor=np.zeros([nx1,ny1])
xf=np.zeros([5]);yf=np.zeros([5])
eta=initial.eta_init(eta,nx,ny,slope,dx,chl) #Initial Bed Elevation
x_center,eta_center=initial.xe_center(x_center,eta_center,nx,slope,dx,chl)
x = np.linspace(0, chl, nx1)
y = np.linspace(0, chb, ny1)
Y,X= np.meshgrid(y,x)
#print(nx,ny)
#print(np.shape(X),np.shape(hx))
#exit()
#Basic Hydraulic Values
hs0=(snm*qp/chb/math.sqrt(slope))**(3/5)
u0=qp/(hs0*chb)
qu0=u0*hs0*dy
hlstep=0.002
hlmin=int(hs0*0.2/hlstep)*hlstep
hlmax=int(hs0*2./hlstep)*hlstep
levels=np.arange(hlmin,hlmax,hlstep)
#print(hlmin,hlmax,hlstep)
ulstep=0.05
ulmin=int(u0*0./ulstep)*ulstep
ulmax=int(u0*4./ulstep)*ulstep
ulevels=np.arange(ulmin,ulmax,ulstep)
vlstep=0.5
vlmax=8.; vlmin=-vlmax
vlevels=np.arange(vlmin,vlmax,vlstep)
#print(vlevels, len(vlevels))
m=len(vlevels)
#print(vlevels[0],vlevels[m-1])
vlevels[0]=vlevels[0]-2.
vlevels[m-1]=vlevels[m-1]+2.
#print(vlevels)
#Downstream Uniform Flow Depth
if j_east==0 and j_hdown==1:
h_down=eta_center[nx+1]+hs0
u=initial.u_init(u,u0,nx,ny); un=u #Initial Velocities
h,hs=initial.h_init(h,hs,eta,hs0,nx,ny); hn=h #Initial Depth and Water Surface Elevation
ep,ep_x=initial.ep_init(ep,ep_x,nx,ny,snu_0)
ijh=obstacle.ob_ini(ijh,nx,ny) # Setup Obstacle Cells
#print(ijh)
#for i in np.arange(0,nx+1):
# print(eta[i,nym],hs[i,nym],ep[i,nym])
h,hs=boundary.h_bound(h,hs,eta,nx,ny,j_west,j_east,j_hdown,h_down)
h_center[:]=h[:,nym]
hn=copy.copy(h)
u=boundary.u_bound(u,nx,ny,j_west,j_east,ijh,u0); un=u
v=boundary.v_bound(v,nx,ny,ijh) ; vn=v
hs_up=stgg.hs_up_c(hs_up,hs,nx,ny)
hs_vp=stgg.hs_vp_c(hs_vp,hs,nx,ny)
qu,qc=rhs.qu_cal(qu,qc,u,nx,ny,dy,hs_up)
qv=rhs.qv_cal(qv,v,nx,ny,dx,hs_vp)
#print('qc=',qc)
qadj=qc[0]/qp
u_input=u0/qadj
#gux,guy,gvx,gvy=initial.diffs_init(gux,guy,gvx,gvy,u,v,nx,ny,dx,dy)
#gux,guy=boundary.gbound_u(gux,guy,ijh,nx,ny)
#gvx,gvy=boundary.gbound_v(gvx,gvy,ijh,nx,ny)
#print(gux)
u_vp=stgg.u_vp_c(u_vp,u,nx,ny)
hs_vp=stgg.hs_vp_c(hs_vp,hs,nx,ny)
v_up=stgg.v_up_c(v_up,v,nx,ny)
hs_up=stgg.hs_up_c(hs_up,hs,nx,ny)
#print(hs_up)
time=0.
icount=0
#ux,vx,hx,uv2=uxvx.uv(ux,vx,uv2,hx,u,v,h,nx,ny)
nfile=0
# os.system("del /Q .\png\*.png")
iskip=1
l=0
########### Main ############
fig, ax = plt.subplots(figsize = (xsize, ysize))
while time<= etime:
usta,ep,ep_x=fric.us_cal(usta,ep,ep_x,u,v,hs,nx,ny,snm,g_sqrt,hmin,ep_alpha)
if icount%it_out==0:
print('time=',np.round(time,3),l)
# print(np.round(qc[0:20],3))
# print('qc=',qc)
nfile=nfile+1
# print(np.round(vn,5))
ux,vx,uv2,hx,hsx=uxvx.uv(ux,vx,uv2,hx,hsx,u,v,h,hs,nx,ny)
vor=uxvx.vortex(vor,ux,vx,nx,ny,dx,dy)
# im=plt.contourf(X, Y, hx)
# im=plt.colorbar()
# ims.append(im)
# print('X=',np.round(X,5),'Y=',np.round(Y,5))
# print('ux=',np.round(ux,5),'vx=',np.round(ux,5))
# # fig, ax = plt.subplots(figsize = (xsize, ysize))
cont=ax.contourf(X, Y, vor, vlevels, cmap='coolwarm')
vect=ax.quiver(X[::iskip], Y[::iskip], ux[::iskip], vx[::iskip], \
width=0.002,headwidth=3)
cb = fig.colorbar(cont)
t = ax.set_title('{:.2f}'.format(np.round(time,3)))
for i in np.arange(1,nx+1):
for j in np.arange(1,ny+1):
if ijh[i,j]>=0.1:
xf[0]=x[i-1];yf[0]=y[j-1]
xf[1]=x[i] ;yf[1]=y[j-1]
xf[2]=x[i] ;yf[2]=y[j]
xf[3]=x[i-1];yf[3]=y[j]
xf[4]=x[i-1];yf[4]=y[j-1]
ax.fill(xf,yf,color = "green")
# fname="./png/" + 'f%04d' % nfile + '.png'
fname='f%04d' % nfile + '.png'
im=plt.savefig(fname)
# plt.clf()
# plt.close()
cb.remove()
plt.cla()
#1d h_center[:]=h[:,nym];hs_center[:]=hs[:,nym]
#1d qu_center[:]=qu[:,nym]
#1d im=plt.title("Longitudinal Qu")
#1d im=plt.xlabel("x(m)"); im=plt.ylabel("Qu")
#1d im=plt.plot(x_center,eta_center,'r')
#1d im=plt.plot(x_center,qu_center,'b')
#1d im=plt.plot(x_center,hs_center,'g')
#1d ims.append(im)
#Velocities in Non Advection Phase
l=0
while l<lmax:
v_up=stgg.v_up_c(v_up,vn,nx,ny)
hs_up=stgg.hs_up_c(hs_up,hs,nx,ny)
cfx=cfxy.cfxc(cfx,nx,ny,hs,un,g,snm,v_up,hs_up)
un=rhs.un_cal(un,u,nx,ny,dx,cfx,hn,g,dt)
un=boundary.u_bound(un,nx,ny,j_west,j_east,ijh,u_input)
qu,qc=rhs.qu_cal(qu,qc,un,nx,ny,dy,hs_up)
u_vp=stgg.u_vp_c(u_vp,un,nx,ny)
hs_vp=stgg.hs_vp_c(hs_vp,hs,nx,ny)
cfy=cfxy.cfyc(cfy,nx,ny,hs,vn,g,snm,u_vp,hs_vp)
vn=rhs.vn_cal(vn,v,nx,ny,dy,cfy,hn,g,dt)
vn=boundary.v_bound(vn,nx,ny,ijh)
qv=rhs.qv_cal(qv,vn,nx,ny,dx,hs_vp)
hn,hs,err=hcal.hh(hn,h,hs,eta,qu,qv,ijh,area,alh,hmin,nx,ny,dt)
hn,hs=boundary.h_bound(hn,hs,eta,nx,ny,j_west,j_east,j_hdown,h_down)
# for i in np.arange(1,40):
# for j in np.arange(10,11):
# print(i,j, np.round(hn[i,j]-h[i,j],8))
# print('l,err=',l,err)
if err<errmax:
break
l=l+1
#Diffusion
un=diffusion.diff_u(un,uvis,uvis_x,uvis_y,nx,ny,dx,dy,dt,ep,ep_x,cw)
un=boundary.u_bound(un,nx,ny,j_west,j_east,ijh,u_input)
vn=diffusion.diff_v(vn,vvis,vvis_x,vvis_y,nx,ny,dx,dy,dt,ep,ep_x)
vn=boundary.v_bound(vn,nx,ny,ijh)
#Differentials in Non Advection Phase
gux,guy=newgrd.ng_u(gux,guy,u,un,nx,ny,dx,dy)
gux,guy=boundary.gbound_u(gux,guy,ijh,nx,ny)
gvx,gvy=newgrd.ng_v(gvx,gvy,v,vn,nx,ny,dx,dy)
gvx,gvy=boundary.gbound_v(gvx,gvy,ijh,nx,ny)
#Advection Phase
fn,gxn,gyn=mkzero.z0(fn,gxn,gyn,nx,ny)
v_up=stgg.v_up_c(v_up,v,nx,ny)
fn,gxn,gyn=cip2d.u_cal1(un,gux,guy,u,v_up,fn,gxn,gyn,nx,ny,dx,dy,dt)
un,gux,guy=cip2d.u_cal2(fn,gxn,gyn,u,v_up,un,gux,guy,nx,ny,dx,dy,dt)
un=boundary.u_bound(un,nx,ny,j_west,j_east,ijh,u_input)
gux,guy=boundary.gbound_u(gux,guy,ijh,nx,ny)
fn,gxn,gyn=mkzero.z0(fn,gxn,gyn,nx,ny)
u_vp=stgg.u_vp_c(u_vp,u,nx,ny)
fn,gxn,gyn=cip2d.v_cal1(vn,gvx,gvy,u_vp,v,fn,gxn,gyn,nx,ny,dx,dy,dt)
vn,gvx,gvy=cip2d.v_cal2(fn,gxn,gyn,u_vp,v,vn,gvx,gvy,nx,ny,dx,dy,dt)
vn=boundary.v_bound(vn,nx,ny,ijh)
gvx,gvy=boundary.gbound_v(gvx,gvy,ijh,nx,ny)
h=copy.copy(hn); u=copy.copy(un); v=copy.copy(vn)
#Time Step Update
time=time+dt
icount=icount+1
#1d ani = animation.ArtistAnimation(fig, ims)
#1d plt.show()
#1d ani.save('flow.gif',writer='imagemagick')
#1d ani.save('flow.mp4',writer='ffmpeg')
# subprocess.call('ffmpeg -framerate 30 -i png/f%4d.png -r 60 -an -vcodec libx264 -pix_fmt yuv420p animation.mp4', shell=True)
# os.system("ffmpeg -i animation.mp4 animation.gif -loop 0")
| main_Numba.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%capture
# needed to support mongo+srv connection strings
# # !/home/yprift01/jupyter/notebook-env/bin/python -m pip install --upgrade pip
# !pip install dnspython
# !pip install pymongo
# +
# %%capture
from pymongo import MongoClient
# Fixed period analysis - change these to extend/restrict the period
# Date as a yyyymmddhhMMss string
#date_from = '20220409000000'
#date_to = '20220419000000'
# connectiong to mongodb cluster using a read only user
## REMOTE REPLICA SET
client = MongoClient("mongodb+srv://<user>:<pass>@rs.prifti.us/?ssl=false&authSource=chan")
## LOCAL SINGLE INSTANCE
## 192.168.1.110 -- localhost
# client = MongoClient("mongodb://<user>:<pass>@localhost:27777/?ssl=false&serverSelectionTimeoutMS=5000&connectTimeoutMS=10000&authSource=chan&authMechanism=SCRAM-SHA-256")
# connection to chan database
db = client.chan
# the threads collection contains API data collections from the 4chan api and 4pleb api
threads = db.threads
# the 'live-4chan' collection contains the OXPath data collection
livechan = db['y-live-4chan']
# + code_folding=[]
# # %%capture
import pandas as pd
date_from = '202105010000'
date_to = '202111010000'
pipeline = [
{"$match": { "$and": [
{ "url": {"$ne": "https://boards.4chan.org/pol/thread/124205675" }} ,
{ "url": {"$ne": "https://boards.4chan.org/pol/thread/259848258" }}
] } }
,{"$project": {"_id": 1, "last_scan": {"$last": "$scans"}, "url": 1 } }
,{"$match": { "$and": [ { "last_scan": {"$gt": date_from } },
{ "last_scan": {"$lt": date_to } } ] } }
,{"$sort": {"last_scan": -1 } }
]
# options = { "allowDiskUse": True }
dataset = pd.DataFrame(livechan.aggregate(pipeline, allowDiskUse=True))
# +
date_from = '202105010000'
date_to = '202111010000'
date_format = "%Y%m%d%H%M%S"
dt_to = dt.strptime(date_to, date_format)
dt_from = dt.strptime(date_from, date_format)
nr_days = (dt_to - dt_from).days - 1
print(nr_days)
instances_pipeline = [
{"$match": { "$or": [
{ "url": {"$eq": "https://boards.4chan.org/pol/thread/124205675" }}
,{ "url": {"$eq": "https://boards.4chan.org/pol/thread/!!!259848258!!!" }}
] } }
,{"$project": {"_id": 1, "scans": 1, "last_scan": {"$last": "$scans"},
"first_scan": {"$first": "$scans"}, "url": 1 } }
,{"$unwind": "$scans"}
,{"$match": { "$and": [ { "scans": {"$gt": date_from } },
{ "scans": {"$lt": date_to } } ] } }
,{"$sort": {"last_scan": -1 } }
]
instances_dataset = pd.DataFrame(livechan.aggregate(instances_pipeline, allowDiskUse=True))
# -
instances_dataset
# +
date_from = '202105010000'
date_to = '202111010000'
date_format = "%Y%m%d%H%M%S"
dt_to = dt.strptime(date_to, date_format)
dt_from = dt.strptime(date_from, date_format)
nr_days = (dt_to - dt_from).days - 1
print(nr_days)
posts_pipeline = [
{"$match": { "$and": [
{ "url": {"$ne": "https://boards.4chan.org/pol/thread/124205675" }}
,{ "url": {"$ne": "https://boards.4chan.org/pol/thread/259848258" }}
,{ "last_extraction": {"$ne": None} }
] } }
,{"$match": { "$and": [ { "last_extraction": {"$gt": date_from } },
{ "last_extraction": {"$lt": date_to } } ] } }
,{"$project": {"_id": 1, "data": {"$last": "$data"}, "url": 1, "last_extraction":1 } }
,{"$match": {"$and": [ {"data.payload": {"$ne": None}}, {"data.payload.thread": {"$ne": None}} ] } }
,{"$project": {"_id": 1, "data": {"$last": "$data.payload.thread"}, "url": 1 } }
,{"$match": {"data.replies": {"$ne": None}}}
,{"$project": {"_id": 1, "nr_replies": {"$size": "$data.replies"}, "url": 1 } }
# ,{"$sort": {"last_extraction": -1 } }
# ,{"$limit": 10}
]
posts_dataset = pd.DataFrame(livechan.aggregate(posts_pipeline, allowDiskUse=True))
posts_dataset
# +
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime as dt
# fig = plt.figure(figsize=(12,8), dpi= 100, facecolor='w', edgecolor='k')
plt.rcParams["figure.figsize"] = (16,8)
date_format = "%Y%m%d%H%M%S"
dt_to = dt.strptime(date_to, date_format)
dt_from = dt.strptime(date_from, date_format)
nr_days = (dt_to - dt_from).days - 1
# dataset
# dataset['extraction_minute'] = dataset['last_extraction'].str[:12]
dataset['hour'] = dataset['last_scan'].str[8:12]
temp_df = dataset[['hour', 'url']].copy()
temp_df['hour'] = pd.to_datetime(temp_df['hour'], format="%H%M")
temp_df.set_index('hour', drop=False, inplace=True)
temp_df = temp_df['hour'].groupby(pd.Grouper(freq='60Min')).count()
df = pd.DataFrame({"hour": temp_df.index, "count": temp_df.values})
df['count'] = round(df['count']/nr_days, 0)
# df.set_index('hour', drop=False, inplace=True)
calc_mean = mean(df['count'])
calc_std = std(df['count'])
calc_median = median(df['count'])
calc_min = min(df['count'])
calc_max = max(df['count'])
ax = df.plot(kind='bar', color='r', y = 'count', x='hour')
ticklabels = df['hour'].apply(lambda x: x.strftime('%H:%Mh'))
ax.xaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(ticklabels))
# Add title and axis names
title = 'Hourly number of discovered threads over the {} days between {} and {}. mean:{:.2f}, median:{:.0f} ,std: {:.2f}, min: {}, max: {}'.format(
nr_days, dt_from.strftime("%Y-%m-%d"), dt_to.strftime("%Y-%m-%d"), calc_mean, calc_median, calc_std, calc_min, calc_max)
plt.title(title)
plt.ylabel('Nr. of discovered threads')
plt.xlabel('Hour of the day UTC')
plt.show()
#dataset[['extraction_minute', 'url']].groupby(['extraction_minute']).agg(['count']).hist()
#dataset[['extraction_hour', 'url']].groupby(['extraction_hour']).agg(['count'])#.hist()
# +
print('Total number of threads: {}'.format(len(dataset)))
print('Total number of discovery instances: {}'.format(len(instances_dataset)))
print('Total number of posts: {}'.format(sum(posts_dataset['nr_replies'])))
| jupyter/4chan-data-campaign.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="rF7pYIhsLtui"
# # SIT742: Modern Data Science
# **(Week 07: Big Data Platform (II))**
#
# ---
# - Materials in this module include resources collected from various open-source online repositories.
# - You are free to use, change and distribute this package.
# - If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues)
#
# Prepared by **SIT742 Teaching Team**
#
# ---
#
#
#
# ## Session 7B - Spark MLlib (1): Data Types
#
#
# The purpose of this session is to demonstrate different [coefficient and linear regression](https://statisticsbyjim.com/glossary/regression-coefficient/).
#
#
# ### Content
#
# ### Part 1 Vectors
#
# 1.1 Dense and Sparse Vectors
#
# 1.2 Labeled Points
#
#
# ### Part 2 Matrix Data Types
#
# 2.1 Local Matrix
#
# 2.2 Row Matrix
#
# 2.3 Indexed Row Matrix
#
# 2.4 Coordinate Matrix
#
# 2.5 Block Matrix
#
# ### Part 3 Matrix Conversions
#
# 3.1 Indexed Row Matrix Conversions
#
# 3.2 Coordinate Matrix Conversions
#
# 3.3 Block Matrix Conversions
#
# + [markdown] colab_type="text" id="fxE6Q1h2Ltuj"
# # Part1. Vectors
#
# + [markdown] colab_type="text" id="iUYZnCRshnLs"
# ## 1.1.Dense and Sparse Vectors
#
# Spark has many libraries, namely under MLlib (Machine Learning Library). It allows for quick and easy scalability of practical machine learning.
#
# In this lab exercise, you will learn about the basic Data Types that are used in Spark MLlib. This lab will help you develop the building blocks required to continue developing knowledge in machine learning with Spark.
#
# Import the following libraries: <br>
# <ul>
# <li> numpy as np </li>
# <li> scipy.sparse as sps </li>
# <li> Vectors from pyspark.mllib.linalg </li>
# </ul>
# + colab={} colab_type="code" id="LMSzEzOPQ94_"
# !apt-get update
# !apt-get install openjdk-8-jdk-headless -qq > /dev/null
# !wget -q http://apache.osuosl.org/spark/spark-3.1.1/spark-3.1.1-bin-hadoop3.2.tgz
# !tar xf spark-3.1.1-bin-hadoop3.2.tgz
# !pip install -q findspark
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-3.1.1-bin-hadoop3.2"
import findspark
findspark.init()
# + colab={} colab_type="code" id="5OKTJqzVLtuk"
import numpy as np
import scipy.sparse as sps
from pyspark.mllib.linalg import Vectors
import time
# + [markdown] colab_type="text" id="-GjbNjCnLtuo"
# A local vector has integer-typed and 0-based indices and double-typed values, stored on a single machine. MLlib supports two types of **local vectors**: **dense** and **sparse**. A dense vector is backed by a double array representing its entry values, while a sparse vector is backed by two parallel arrays: indices and values. For example, a vector (1.0, 0.0, 3.0) can be represented in dense format as [1.0, 0.0, 3.0] or in sparse format as (3, [0, 2], [1.0, 3.0]), where 3 is the size of the vector.
#
#
# First, we will be dealing with <b>Dense Vectors</b>. For example, we assume that the dense vectors will be modeled having the values: <b>8.0, 312.0, -9.0, 1.3</b>. There are 2 types of <b>dense vectors</b> that we can create.
#
# The first dense vector we will create is as easy as creating a numpy array, which is using the np.array function, create a dense vector called dense_vector1.
#
# + [markdown] colab_type="text" id="x_t6dCTJLtuo"
# Note: numpy's array function takes an array as input
# + colab={} colab_type="code" id="spj1u4WqLtup"
dense_vector1 = np.array([8.0, 312.0, -9.0, 1.3])
print (dense_vector1)
type(dense_vector1)
# + [markdown] colab_type="text" id="3qv14ZuFLtut"
# The second <b>dense vector</b> is easier than the first, and is made by creating an <b>array</b>, which is to create a <b>dense vector</b> called <b>dense_vector2</b>
# + colab={} colab_type="code" id="NgHqVADALtut"
dense_vector2 = [8.0, 312.0, -9.0, 1.3]
print (dense_vector2)
type (dense_vector2)
# + [markdown] colab_type="text" id="dTXUbxQKLtux"
# Next, we will be dealing with <b>sparse vectors</b>. There are 2 types of <b>sparse vectors</b> we can create. <br>
# The sparse vectors we will be creating will follow these values: <b> 7.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 6.5 </b>
# + [markdown] colab_type="text" id="J_bkfTv_Ltuy"
# First, create a <b>sparse vector</b> called <b>sparse_vector1</b> using Vector's <b>sparse</b> function. <br>
# Parameters to Vector.sparse: <br>
# <ul>
# <li>1st parameter: Size of the sparse vector</li>
# <li>2nd parameter: Indicies of array</li>
# <li>3rd parameter: Values placed where the indices are</li>
# </ul>
# + colab={} colab_type="code" id="xQjlxLH5Ltuz"
#Size of the sparse vector =10
#Indicies of array:[0, 3, 5, 9]. Becuase the index of 7.0 is 0, the index of 2.0 is 3, the index of 1.0 is 5
#and the index of 6.5 is 9
#Values placed where the indices are:[7.0, 2.0, 1.0, 6.5]
sparse_vector1 = Vectors.sparse(10, [0, 3, 5, 9], [7.0, 2.0, 1.0, 6.5])
print(sparse_vector1)
type(sparse_vector1)
# + [markdown] colab_type="text" id="VFPkxm3WLtu3"
# Next we will create a <b>sparse vector</b> called <b>sparse_vector2</b> using a single-column SciPy <b>csc_matrix</b> <br> <br>
# The inputs to sps.csc_matrix are: <br>
# <ul>
# <li>1st: A tuple consisting of the three inputs:</li>
# <ul>
# <li>1st: Data Values (in a numpy array) (values placed at the specified indices)</li>
# <li>2nd: Indicies of the array (in a numpy array) (where the values will be placed)</li>
# <li>3rd: Index pointer of the array (in a numpy array)</li>
# </ul>
# <li>2nd: Shape of the array (#rows, #columns) Use 10 rows and 1 column</li>
# <ul>
# <li>shape = (\_,\_)</li>
# </ul>
# </ul> <br>
# Note: You may get a deprecation warning. Please Ignore it.
# + colab={} colab_type="code" id="Kj5AQlFbLtu4"
#[7.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 6.5]
#Data Values[7.0, 2.0, 1.0, 6.5] All none-zero value
#Indicies of the array[0,3,5,9] All none-zero value Indicies
#Index pointer of the array[0,4] The number of all nonx-zero value
#Shape[10,1] 10 row and 1 column
sparse_vector2 = sps.csc_matrix((np.array([7.0, 2.0, 1.0, 6.5]), np.array([0, 3, 5, 9]), np.array([0, 4])), shape = (10, 1))
print (sparse_vector2)
print (type(sparse_vector2))
print (sparse_vector2.toarray())
# + [markdown] colab_type="text" id="xwlPRhfmrk06"
# You also can try the ** sps.csr_matrix** function. It syntax is similar with the csc_martix. just the definition of the Shape is different.
# + colab={} colab_type="code" id="6VgQG_uMrjSU"
#[7.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 6.5]
#[7.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 6.5]
#Data Values[7.0, 2.0, 1.0, 6.5] All none-zero value
#Indicies of the array[0,3,5,9] All none-zero value Indicies
#Index pointer of the array[0,4] The number of all nonx-zero value
#Shape[1,10] 1 row and 10 column
sparse_vector3 = sps.csr_matrix((np.array([7.0, 2.0, 1.0, 6.5]), np.array([0, 3, 5, 9]), np.array([0, 4])), shape = (1, 10))
print (sparse_vector3)
print (type(sparse_vector3))
print (sparse_vector3.toarray())
# + [markdown] colab_type="text" id="9OUCr_OXLtu8"
# <a id = "lpoints"></a>
# ### <span style="color:#0b486b">1.2 Labeled Points</span>
#
# So the next data type will be Labeled points. A labeled point is a local vector, either dense or sparse, associated with a label/response. In MLlib, labeled points are used in supervised learning algorithms. We use a double to store a label, so we can use labeled points in both regression and classification. For binary classification, a label should be either 0 (negative) or 1 (positive). For multiclass classification, labels should be class indices starting from zero: 0, 1, 2, ....
#
# Start by importing the following libraries: <br>
# <ul>
# <li>SparseVector from pyspark.mllib.linalg</li>
# <li>LabeledPoint from pyspark.mllib.regression</li>
# </ul>
# Remember that this data type is mainly used for **classification algorithms in supervised learning**.<br>
# + colab={} colab_type="code" id="7WNcBzIuLtu9"
from pyspark.mllib.linalg import SparseVector
from pyspark.mllib.regression import LabeledPoint
# + [markdown] colab_type="text" id="-zoLOpJxLtvA"
# Remember that with a lableled point, we can create binary or multiclass classification. In this lab, we will deal with binary classification for ease. <br> <br>
# The <b>LabeledPoint</b> function takes in 2 inputs:
# <ul>
# <li>1st: Label of the Point. In this case (for binary classification), we will be using <font color="green">1.0</font> for <font color="green">positive</font> and <font color="red">0.0</font> for <font color="red">negative</font></li>
# <li>2nd: Vector of features for the point (We will input a Dense or Sparse Vector using any of the methods defined in the <b>Dense and Sparse Vectors</b> section of this lab.</b>
# </ul>
# + [markdown] colab_type="text" id="6tgapsamLtvB"
# Using the LabelPoint class, create a <b>dense</b> feature vector with a <b>positive</b> label called <b>pos_class</b> with the values: <b>5.0, 2.0, 1.0, 9.0</b>
# + colab={} colab_type="code" id="DNYHl94wLtvB"
#1.0 means the positive
#[5.0, 2.0, 1.0, 9.0] are vectors of features for the point
pos_class = LabeledPoint(1.0, [5.0, 2.0, 1.0, 9.0])
print(pos_class)
type(pos_class)
# + [markdown] colab_type="text" id="bM4Q3-kELtvG"
# Next we will create a <b>sparse</b> feature vector with a <b>negative</b> label called <b>neg_class</b> with the values: <b>1.0, 0.0, 0.0, 4.0, 0.0, 2.0</b>
# + colab={} colab_type="code" id="7SPDsrF3LtvH"
neg_class = LabeledPoint(0.0, SparseVector(6, [0, 3, 5], [1.0, 4.0, 2.0]))
print(neg_class)
type(neg_class)
# + [markdown] colab_type="text" id="fxIOHyJkLtvN"
# ---
# ## <span style="color:#0b486b">2. Matrix Data Types</span>
#
#
# In this next section, we will be dealing creating the following matrices:
# <ul>
# <li>Local Matrix</li>
# <li>Row Matrix</li>
# <li>Indexed Row Matrix</li>
# <li>Coordinate Matrix</li>
# <li>Block Matrix</li>
# </ul>
#
# Throughout this section, we will be modelling the following matricies: <br>
#
# <center>For a Dense Matrix:</center> <br>
#
# $$
# \begin{pmatrix}
# 1.00 & 6.00 & 3.00 & 0.00 \\
# 3.00 & 2.00 & 5.00 & 1.00 \\
# 9.00 & 4.00 & 0.00 & 3.00
# \end{pmatrix}
# $$
#
# <center>For a Sparse Matrix:</center> <br>
#
# $$
# \begin{pmatrix}
# 1.00 & 0.00 & 3.00 & 0.00 \\
# 3.00 & 0.00 & 0.00 & 1.00 \\
# 0.00 & 4.00 & 0.00 & 0.00
# \end{pmatrix}
# $$
# + [markdown] colab_type="text" id="pRH3OxKdLtvN"
# <a id = "lm"></a>
# ### <span style="color:#0b486b">2.1 Local Matrix</span>
#
# A local matrix has integer-typed row and column indices and double-typed values, stored on a single machine. MLlib supports dense matrices, whose entry values are stored in a single double array in column-major order, and sparse matrices, whose non-zero entry values are stored in the Compressed Sparse Column (CSC) format in column-major order.
#
# Import the following Library:
# <ul>
# <li>pyspark.mllib.linalg as laMat</li>
# </ul>
# + colab={} colab_type="code" id="Y6ZCp5fPLtvO"
import pyspark.mllib.linalg as laMat
# + [markdown] colab_type="text" id="ooA-s7JkLtvP"
# Create a dense local matrix called <b>dense_LM</b> <br>
# The inputs into the <b>laMat.Matrices.dense</b> function are:
# <ul>
# <li>1st: Number of Rows</li>
# <li>2nd: Number of Columns</li>
# <li>3rd: Values in an array format (Read as Column-Major)</li>
# </ul>
# + colab={} colab_type="code" id="kHPxNCs_LtvP"
#3 Rows
#4 Columns
#[1.0, 3.0, 9.0, 6.0, 2.0, 4.0, 3.0, 5.0, 0.0, 0.0, 1.0, 3.0] are values in an array format
dense_LM = laMat.Matrices.dense(3,4, [1.0, 3.0, 9.0, 6.0, 2.0, 4.0, 3.0, 5.0, 0.0, 0.0, 1.0, 3.0])
print(dense_LM)
type(dense_LM)
# + [markdown] colab_type="text" id="qSCT1fI9LtvW"
# Next we will do the same thing with a sparse matrix, calling the output <b>sparse_LM</b>
# The inputs into the <b>laMat.Matrices.sparse</b> function are:
# <ul>
# <li>1st: Number of Rows</li>
# <li>2nd: Number of Columns</li>
# <li>3rd: Column Pointers (in a list)</li>
# <li>4th: Row Indices (in a list)</li>
# <li>5th: Values of the Matrix (in a list)</li>
# </ul> <br>
# <b>Note</b>: Remember that this is <b>column-major</b> so all arrays should be read as columns first (top down, left to right)
# + colab={} colab_type="code" id="6bwOstRmLtvW"
#For a spare Martix
# ([[1., 0., 3., 0.],
# [3., 0., 0., 1.],
# [0., 4., 0., 0.]])
#1st: Number of Rows = 3
#2nd: Number of Columns= 4
#3rd: Column Pointers (in a list) = [0, 2, 3, 4, 5]
#4th: Row Indices (in a list) = [0, 1, 2, 0, 1]
#5th: Values of the Matrix (in a list) = [1.0, 3.0, 4.0, 3.0, 1.0]
sparse_LM = laMat.Matrices.sparse(3, 4, [0, 2, 3, 4, 5], [0, 1, 2, 0, 1], [1.0, 3.0, 4.0, 3.0, 1.0])
print(sparse_LM)
type(sparse_LM)
print(sparse_LM.toDense())
# + [markdown] colab_type="text" id="o0TC5c49Ltva"
# Make sure the output of <b>sparse_LM</b> matches the original matrix.
#
# Please refer the sample on the webpage for the understanding: https://stackoverflow.com/questions/44825193/how-to-create-a-sparse-cscmatrix-using-spark
# + [markdown] colab_type="text" id="auGeFkoFLtva"
# <a id = "rm"></a>
# ### <span style="color:#0b486b">2.2 Row Matrix</span>
#
# A RowMatrix is a row-oriented distributed matrix without meaningful row indices, backed by an RDD of its rows, where each row is a local vector. Since each row is represented by a local vector, the number of columns is limited by the integer range but it should be much smaller in practice.
#
# Import the following library:
# <ul>
# <li>RowMatrix from pyspark.mllib.linalg.distributed</li>
# </ul>
# + colab={} colab_type="code" id="OAlvgx4ULtvb"
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark import SparkContext
from pyspark.sql import SQLContext
sc = SparkContext.getOrCreate()
sqlContext = SQLContext(sc)
# + [markdown] colab_type="text" id="3v58xbj4Ltvc"
# Now, let's create a RDD of vectors called <b>rowVecs</b>, using the SparkContext's parallelize function on the <b>Dense Matrix</b>.<br>
# The input into <b>sc.parallelize</b> is:
# <ul>
# <li>A list (The list we will be creating will be a list of the row values (each row is a list))</li>
# </ul> <br>
# <b>Note</b>: And RDD is a fault-tolerated collection of elements that can be operated on in parallel. <br>
# + colab={} colab_type="code" id="NU9gd8gELtvc"
rowVecs = sc.parallelize([[1.0, 6.0, 3.0, 0.0],
[3.0, 2.0, 5.0, 1.0],
[9.0, 4.0, 0.0, 3.0]])
# + [markdown] colab_type="text" id="GrEDkMImLtve"
# Next, create a variable called <b>rowMat</b> by using the <b>RowMatrix</b> function and passing in the RDD.
# + colab={} colab_type="code" id="-cu9FWO0Ltvf"
rowMat = RowMatrix(rowVecs)
# + [markdown] colab_type="text" id="VlBA51xsLtvg"
# Now we will retrieve the <font color="green">row numbers</font> (save it as <font color="green">m</font>) and <font color="blue">column numbers</font> (save it as <font color="blue">n</font>) from the RowMatrix.
# <ul>
# <li>To get the number of rows, use <i>numRows()</i> on rowMat</li>
# <li>To get the number of columns, use <i>numCols()</i> on rowMat</li>
# </ul>
# + colab={} colab_type="code" id="_KZ07eSfLtvh"
m = rowMat.numRows()
n = rowMat.numCols()
# + [markdown] colab_type="text" id="B08WFgLELtvi"
# Print out <b>m</b> and <b>n</b>. The results should be:
# <ul>
# <li>Number of Rows: 3</li>
# <li>Number of Columns: 4</li>
# </ul>
# + colab={} colab_type="code" id="Q3rjVcQELtvj"
print(m)
print(n)
# + [markdown] colab_type="text" id="AHAqheSfLtvk"
# <a id = "irm"></a>
# ### <span style="color:#0b486b">2.3 Indexed Row Matrix</span>
#
# An IndexedRowMatrix is similar to a RowMatrix but with meaningful row indices. It is backed by an RDD of indexed rows, so that each row is represented by its index (long-typed) and a local vector.
#
# Import the following Library:
# <ul>
# <li> IndexedRow, IndexedRowMatrix from pyspark.mllib.linalg.distributed</li>
# </ul>
# + colab={} colab_type="code" id="po_80C3MLtvl"
from pyspark.mllib.linalg.distributed import IndexedRow, IndexedRowMatrix
# + [markdown] colab_type="text" id="OuGXYDjOLtvm"
# Now, create a RDD called <b>indRows</b> by using the SparkContext's parallelize function on the <b>Dense Matrix</b>. <br>
# There are two different inputs you can use to create the RDD:
# <ul>
# <li>Method 1: A list containing multiple IndexedRow inputs</li>
# <ul>
# <li>Input into IndexedRow:</li>
# <ul>
# <li>1. Index for the given row (row number)</li>
# <li>2. row in the matrix for the given index</li>
# </ul>
# <li>ex. sc.parallelize([IndexedRow(0,[1, 2, 3]), ...])</li>
# </ul> <br>
# <li>Method 2: A list containing multiple tuples</li>
# <ul>
# <li>Values in the tuple:</li>
# <ul>
# <li>1. Index for the given row (row number) (type:long)</li>
# <li>2. List containing the values in the row for the given index (type:vector)</li>
# </ul>
# <li>ex. sc.parallelize([(0, [1, 2, 3]), ...])</li>
# </ul>
# </ul>
# + colab={} colab_type="code" id="gKQaa96OLtvn"
# Method 1: Using IndexedRow class
indRows = sc.parallelize([IndexedRow(0, [1.0, 6.0, 3.0, 0.0]),
IndexedRow(1, [3.0, 2.0, 5.0, 1.0]),
IndexedRow(2, [9.0, 4.0, 0.0, 3.0])])
# Method 2: Using (long, vector) tuples
indRows = sc.parallelize([(0, [1.0, 6.0, 3.0, 0.0]),
(1, [3.0, 2.0, 5.0, 1.0]),
(2, [9.0, 4.0, 0.0, 3.0])])
# + [markdown] colab_type="text" id="9tw4Pgc9Ltvp"
# Now, create the <b>IndexedRowMatrix</b> called <b>indRowMat</b> by using the IndexedRowMatrix function and passing in the <b>indRows</b> RDD
# + colab={} colab_type="code" id="bWqDuijvLtvp"
indRowMat = IndexedRowMatrix(indRows)
# + [markdown] colab_type="text" id="aVfcOq4PLtvr"
# Now we will retrieve the <font color="green">row numbers</font> (save it as <font color="green">m2</font>) and <font color="blue">column numbers</font> (save it as <font color="blue">n2</font>) from the IndexedRowMatrix.
# <ul>
# <li>To get the number of rows, use <i>numRows()</i> on indRowMat</li>
# <li>To get the number of columns, use <i>numCols()</i> on indRowMat</li>
# </ul>
# + colab={} colab_type="code" id="2fv_pY00Ltvs"
m2 = indRowMat.numRows()
n2 = indRowMat.numCols()
# + [markdown] colab_type="text" id="I2AAu2a0Ltvv"
# Print out <b>m2</b> and <b>n2</b>. The results should be:
# <ul>
# <li>Number of Rows: 3</li>
# <li>Number of Columns: 4</li>
# </ul>
# + colab={} colab_type="code" id="w-CyNZPlLtvv"
print(m2)
print(n2)
# + [markdown] colab_type="text" id="Uf19b7EDLtvw"
# <a id = "cm"></a>
# ### <span style="color:#0b486b">2.3 Coordinate Matrix</span>
#
#
# Now it's time to create a different type of matrix, whos use should be when both the dimensions of the matrix is very large, and the data in the matrix is sparse. <br>
# <b>Note</b>: In this case, we will be using the small, sparse matrix above, just to get the idea of how to initialize a CoordinateMatrix
#
#
# A CoordinateMatrix is a distributed matrix backed by an RDD of its entries. Each entry is a tuple of (i: Long, j: Long, value: Double), where i is the row index, j is the column index, and value is the entry value. A CoordinateMatrix should be used only when both dimensions of the matrix are huge and the matrix is very sparse.
#
# Import the following libraries:
# <ul>
# <li>CoordinateMatrix, MatrixEntry from pyspark.mllib.linalg.distributed</li>
# </ul>
# + colab={} colab_type="code" id="SfF4syBhLtvx"
from pyspark.mllib.linalg.distributed import CoordinateMatrix, MatrixEntry
# + [markdown] colab_type="text" id="HuwY9ye_Ltvz"
# Now, create a RDD called <b>coordRows</b> by using the SparkContext's parallelize function on the <b>Sparse Matrix</b>. There are two different inputs you can use to create the RDD:
# <ul>
# <li>Method 1: A list containing multiple MatrixEntry inputs</li>
# <ul>
# <li>Input into MatrixEntry:</li>
# <ul>
# <li>1. Row index of the matrix (row number) (type: long)</li>
# <li>2. Column index of the matrix (column number) (type: long)</li>
# <li>3. Value at the (Row Index, Column Index) entry of the matrix (type: float)</li>
# </ul>
# <li>ex. sc.parallelize([MatrixEntry(0, 0, 1,), ...])</li>
# </ul> <br>
# <li>Method 2: A list containing multiple tuples</li>
# <ul>
# <li>Values in the tuple:</li>
# <ul>
# <li>1. Row index of the matrix (row number) (type: long)</li>
# <li>2. Column index of the matrix (column number) (type: long)</li>
# <li>3. Value at the (Row Index, Column Index) entry of the matrix (type: float)</li>
# </ul>
# <li>ex. sc.parallelize([(0, 0, 1), ...])</li>
# </ul>
# </ul>
# + colab={} colab_type="code" id="21qL_8MJLtv0"
# Method 1. Using MatrixEntry class
coordRows = sc.parallelize([MatrixEntry(0, 0, 1.0),
MatrixEntry(0, 2, 3.0),
MatrixEntry(1, 0, 3.0),
MatrixEntry(1, 3, 1.0),
MatrixEntry(2, 2, 4.0)])
# Method 2. Using (long, long, float) tuples
coordRows = sc.parallelize([(0, 0, 1.0),
(0, 2, 3.0),
(1, 1, 3.0),
(1, 3, 1.0),
(2, 2, 4.0)])
# + [markdown] colab_type="text" id="6xsrK-6GLtv1"
# Now, create the <b>CoordinateMatrix</b> called <b>coordMat</b> by using the CoordinateMatrix function and passing in the <b>coordRows</b> RDD
# + colab={} colab_type="code" id="fSuWdfT2Ltv2"
coordMat = CoordinateMatrix(coordRows)
# + [markdown] colab_type="text" id="Ibs1rvwqLtv3"
# Now we will retrieve the <font color="green">row numbers</font> (save it as <font color="green">m3</font>) and <font color="blue">column numbers</font> (save it as <font color="blue">n3</font>) from the CoordinateMatrix.
# <ul>
# <li>To get the number of rows, use <i>numRows()</i> on coordMat</li>
# <li>To get the number of columns, use <i>numCols()</i> on coordMat</li>
# </ul>
# + colab={} colab_type="code" id="VGsRnQqrLtv4"
m3 = coordMat.numRows()
n3 = coordMat.numCols()
# + [markdown] colab_type="text" id="TTLKHZwkLtv7"
# Print out <b>m3</b> and <b>n3</b>. The results should be:
# <ul>
# <li>Number of Rows: 3</li>
# <li>Number of Columns: 4</li>
# </ul>
# + colab={} colab_type="code" id="FbzoGMYvLtv7"
print(m3)
print(n3)
# + [markdown] colab_type="text" id="MANbIKSkLtv8"
# Now, we can get the <b>entries</b> of coordMat by calling the entries method on it. Store this in a variable called coordEnt.
# + colab={} colab_type="code" id="BUS5l9zwLtv9"
coordEnt = coordMat.entries
# + [markdown] colab_type="text" id="PkTmS5OzLtv-"
# Check out the <i>type</i> of coordEnt.
# + colab={} colab_type="code" id="G3_OVVMGLtv_"
type(coordEnt)
# + [markdown] colab_type="text" id="uIKl0SNkLtwA"
# It should be a <b>PipelinedRDD</b> type, which has many methods that are associated with it. One of them is <b>first()</b>, which will get the first element in the RDD. <br> <br>
#
# Run coordEnt.first()
# + colab={} colab_type="code" id="xUEQBDTSLtwA"
coordEnt.first()
# + [markdown] colab_type="text" id="YpkCdrZpLtwC"
# <a id = "bm"></a>
# ### <span style="color:#0b486b">2.4 Block Matrix</span>
#
# A BlockMatrix is essentially a matrix consisting of elements which are partitions of the matrix that is being created.
#
# Import the following libraries:
# <ul>
# <li>Matrices from pyspark.mllib.linalg</li>
# <li>BlockMatrix from pyspark.mllib.linalg.distributed</li>
# </ul>
#
# A BlockMatrix is a distributed matrix backed by an RDD of MatrixBlocks, where a MatrixBlock is a tuple of ((Int, Int), Matrix), where the (Int, Int) is the index of the block, and Matrix is the sub-matrix at the given index with size rowsPerBlock x colsPerBlock. BlockMatrix supports methods such as add and multiply with another BlockMatrix. BlockMatrix also has a helper function validate which can be used to check whether the BlockMatrix is set up properly.
# + colab={} colab_type="code" id="rvYIns0lLtwD"
from pyspark.mllib.linalg import Matrices
from pyspark.mllib.linalg.distributed import BlockMatrix
# + [markdown] colab_type="text" id="TsLuI3LxLtwE"
# Now create a <b>RDD</b> of <b>sub-matrix blocks</b>. <br>
# This will be done using SparkContext's parallelize function. <br>
#
# The input into <b>sc.parallelize</b> requires a <b>list of tuples</b>. The tuples are the sub-matrices, which consist of two inputs:
# <ul>
# <li>1st: A tuple containing the row index and column index (row, column), denoting where the sub-matrix will start</li>
# <li>2nd: The sub-matrix, which will come from <b>Matrices.dense</b>. The sub-matrix requires 3 inputs:</li>
# <ul>
# <li>1st: Number of rows</li>
# <li>2nd: Number of columns</li>
# <li>3rd: A list containing the elements of the sub-matrix. These values are read into the sub-matrix column-major fashion</li>
# </ul>
# </ul> <br>
# (ex. ((51, 2), Matrices.dense(2, 2, [61.0, 43.0, 1.0, 74.0])) would be one row (one tuple)).
# + [markdown] colab_type="text" id="MdRSH_V2LtwF"
# The matrix we will be modelling is <b>Dense Matrix</b> from above. Create the following sub-matrices:
# <ul>
# <li>Row: 0, Column: 0, Values: 1.0, 3.0, 6.0, 2.0, with 2 Rows and 2 Columns </li>
# <li>Row: 2, Column: 0, Values: 9.0, 4.0, with 1 Row and 2 Columns</li>
# <li>Row: 0, Column: 2, Values: 3.0, 5.0, 0.0, 0.0, 1.0, 3.0, with 3 Rows and 2 Columns</li>
# </ul>
# + colab={} colab_type="code" id="dAw16uYtLtwG"
blocks = sc.parallelize([((0, 0), Matrices.dense(2, 2, [1.0, 3.0, 6.0, 2.0])),
((2, 0), Matrices.dense(1, 2, [9.0, 4.0])),
((0, 2), Matrices.dense(3, 2, [3.0, 5.0, 0.0, 0.0, 1.0, 3.0]))])
# + [markdown] colab_type="text" id="AdmDmQQlLtwH"
# Now that we have the RDD, it's time to create the BlockMatrix called <b>blockMat</b> using the BlockMatrix class. The <b>BlockMatrix</b> class requires 3 inputs:
# <ul>
# <li>1st: The RDD of sub-matricies</li>
# <li>2nd: The rows per block. Keep this value at 1</li>
# <li>3rd: The columns per block. Keep this value at 1</li>
# </ul>
# + colab={} colab_type="code" id="dwBp4ZXOLtwH"
blockMat = BlockMatrix(blocks, 1, 1)
# + [markdown] colab_type="text" id="OGOstI_PLtwL"
# Now we will retrieve the <font color="green">row numbers</font> (save it as <font color="green">m4</font>) and <font color="blue">column numbers</font> (save it as <font color="blue">n4</font>) from the BlockMatrix.
# <ul>
# <li>To get the number of rows, use <i>numRows()</i> on blockMat</li>
# <li>To get the number of columns, use <i>numCols()</i> on blockMat</li>
# </ul>
# + colab={} colab_type="code" id="upwe2QUTLtwL"
m4 = blockMat.numRows()
n4 = blockMat.numCols()
# + [markdown] colab_type="text" id="xzRDmNXcLtwN"
# Print out <b>m4</b> and <b>n4</b>. The results should be:
# <ul>
# <li>Number of Rows: 3</li>
# <li>Number of Columns: 4</li>
# </ul>
# + colab={} colab_type="code" id="jtvVPHPMLtwN"
print(m4)
print(n4)
# + [markdown] colab_type="text" id="010_x_4HLtwO"
# Now, we need to check if our matrix is correct. We can do this by first converting <b>blockMat</b> into a LocalMatrix, by using the <b>.toLocalMatrix()</b> function on our matrix. Store the result into a variable called <b>locBMat</b>
# + colab={} colab_type="code" id="7T-s3p7rLtwP"
locBMat = blockMat.toLocalMatrix()
# + [markdown] colab_type="text" id="YXlO4_9wLtwQ"
# Now print out <b>locBMat</b> and its <b>type</b>. The result should model the original <b>Dense Matrix</b> and the type should be a DenseMatrix.
# + colab={} colab_type="code" id="kXH07bVALtwS"
print(locBMat)
print(type(locBMat))
# + [markdown] colab_type="text" id="o_izbJT1wL6R"
# **Conclusion**
#
# Distributed matrix
#
# A distributed matrix has long-typed row and column indices and double-typed values, stored distributively in one or more RDDs. It is very important to choose the right format to store large and distributed matrices. Converting a distributed matrix to a different format may require a global shuffle, which is quite expensive. Four types of distributed matrices have been implemented so far.
#
# The basic type is called **RowMatrix**. A RowMatrix is a row-oriented distributed matrix without meaningful row indices, e.g., a collection of feature vectors. It is backed by an RDD of its rows, where each row is a local vector. We assume that the number of columns is not huge for a RowMatrix so that a single local vector can be reasonably communicated to the driver and can also be stored / operated on using a single node. An **IndexedRowMatrix** is similar to a RowMatrix but with row indices, which can be used for identifying rows and executing joins. A **CoordinateMatrix** is a distributed matrix stored in coordinate list (COO) format, backed by an RDD of its entries. A **BlockMatrix** is a distributed matrix backed by an RDD of MatrixBlock which is a tuple of (Int, Int, Matrix).
#
# **Note**
#
# The underlying RDDs of a distributed matrix must be deterministic, because we cache the matrix size. In general the use of non-deterministic RDDs can lead to errors.
# + [markdown] colab_type="text" id="uEM9zWdXLtwT"
# ---
# ## <span style="color:#0b486b">3. Matrix Conversions</span>
#
#
# In this bonus section, we will talk about a relationship between the different [types of matrices](https://www.emathzone.com/tutorials/algebra/types-of-matrices.html). You can convert between these matrices that we discussed with the following functions. <br>
# <ul>
# <li>.toRowMatrix() converts the matrix to a RowMatrix</li>
# <li>.toIndexedRowMatrix() converts the matrix to an IndexedRowMatrix</li>
# <li>.toCoordinateMatrix() converts the matrix to a CoordinateMatrix</li>
# <li>.toBlockMatrix() converts the matrix to a BlockMatrix</li>
# </ul>
# + [markdown] colab_type="text" id="BW_z4pUYLtwT"
# <a id = "irmc"></a>
# ### <span style="color:#0b486b">3.1 Indexed Row Matrix Conversions</span>
#
# The following conversions are supported for an IndexedRowMatrix:
# <ul>
# <li>IndexedRowMatrix -> RowMatrix</li>
# <li>IndexedRowMatrix -> CoordinateMatrix</li>
# <li>IndexedRowMatrix -> BlockMatrix</li>
# </ul>
# + colab={} colab_type="code" id="6fA6spiGLtwV"
# Convert to a RowMatrix
rMat = indRowMat.toRowMatrix()
print(type(rMat))
# Convert to a CoordinateMatrix
cMat = indRowMat.toCoordinateMatrix()
print(type(cMat))
# Convert to a BlockMatrix
bMat = indRowMat.toBlockMatrix()
print(type(bMat))
# + [markdown] colab_type="text" id="nAiWbc4yLtwW"
# <a id = "cmc"></a>
# ### <span style="color:#0b486b">3.2 Coordinate Matrix Conversions</span>
#
# The following conversions are supported for an CoordinateMatrix:
# <ul>
# <li>CoordinateMatrix -> RowMatrix</li>
# <li>CoordinateMatrix -> IndexedRowMatrix</li>
# <li>CoordinateMatrix -> BlockMatrix</li>
# </ul>
# + colab={} colab_type="code" id="SmfKNn9uLtwX"
# Convert to a RowMatrix
rMat2 = coordMat.toRowMatrix()
print(type(rMat2))
# Convert to an IndexedRowMatrix
iRMat = coordMat.toIndexedRowMatrix()
print(type(iRMat))
# Convert to a BlockMatrix
bMat2 = coordMat.toBlockMatrix()
print(type(bMat2))
# + [markdown] colab_type="text" id="m81USk1uLtwY"
# <a id = "bmc"></a>
# ### <span style="color:#0b486b">3.3 Block Matrix Conversions</span>
#
#
# The following conversions are supported for an BlockMatrix:
# <ul>
# <li>BlockMatrix -> LocalMatrix (Can display the Matrix)</li>
# <li>BlockMatrix -> IndexedRowMatrix</li>
# <li>BlockMatrix -> CoordinateMatrix</li>
# </ul>
# + colab={} colab_type="code" id="UomLrvNdLtwY"
# Convert to a LocalMatrix
lMat = blockMat.toLocalMatrix()
print(type(lMat))
# Convert to an IndexedRowMatrix
iRMat2 = blockMat.toIndexedRowMatrix()
print(type(iRMat2))
# Convert to a CoordinateMatrix
cMat2 = blockMat.toCoordinateMatrix()
print(type(cMat2))
| Jupyter/M09-Optional/SIT742P07B-MLlib-DataType.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:hpaic]
# language: python
# name: conda-env-hpaic-py
# ---
DATADIR = '/s/project/junTemp/HPAIC/'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
from skimage import io, transform
import PIL
image_df = pd.read_csv(DATADIR + "data/png/train.csv")
image_df['target_list'] = image_df['Target'].map(lambda x: [int(a) for a in x.split(' ')])
image_df.head()
import itertools
all_labels = np.array(list(itertools.chain(*image_df.target_list.values)))
dict(zip(*np.unique(all_labels, return_counts=True)))
np.unique(all_labels, return_counts=True)[1]
name_label_dict = {
0: "Nucleoplasm",
1: "Nuclear membrane",
2: "Nucleoli",
3: "Nucleoli fibrillar center" ,
4: "Nuclear speckles" ,
5: "Nuclear bodies" ,
6: "Endoplasmic reticulum",
7: "Golgi apparatus" ,
8: "Peroxisomes" ,
9: "Endosomes" ,
10: "Lysosomes" ,
11: "Intermediate filaments",
12: "Actin filaments" ,
13: "Focal adhesion sites",
14: "Microtubules" ,
15: "Microtubule ends",
16: "Cytokinetic bridge",
17: "Mitotic spindle" ,
18: "Microtubule organizing center" ,
19: "Centrosome" ,
20: "Lipid droplets",
21: "Plasma membrane",
22: "Cell junctions" ,
23: "Mitochondria" ,
24: "Aggresome" ,
25: "Cytosol",
26: "Cytoplasmic bodies",
27: "Rods & rings"
}
# ## Load a few images
# There are 28 classes, lets select one image for each class
CHANELS = ['_yellow', '_red', '_green', '_blue']
def readimg(imgid,
datadir=DATADIR + 'data/png/train/',
suffix='.png',
rgb=True,
stack=True):
imgs = [io.imread(datadir + imgid + cl + suffix) for cl in CHANELS]
if rgb:
imgs[1] += (imgs[0]/2).astype(np.uint8)
imgs[2] += (imgs[0]/2).astype(np.uint8)
img = np.stack(imgs[1:], -1)
return img
else:
if stack:
img = np.stack(imgs, -1)
return img
return imgs
test = readimg(image_df.Id[3])
import math
n = 4
fig, ax = plt.subplots(4, math.ceil(n/4), figsize=(10*math.ceil(n/4),10))
for i in range(4):
ax[i].imshow(readimg(image_df.Id[i]))
ax[i].set_xticklabels([])
ax[i].set_yticklabels([])
ax[i].tick_params(left=False, bottom=False)
plt.show()
# ## Load datasets
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import models, transforms
class HpaDataset(Dataset):
'''
Args:
dt: csv file describing the data
transform (callable, optional): Optional transform to be applied
on a sample.
'''
def __init__(self, dt, transform=None):
imgs = pd.read_csv(dt)
imgs['target_list'] = imgs['Target'].map(lambda x: [int(a) for a in x.split(' ')])
self.ids = imgs.Id
self.targets = imgs.target_list
self.transform = transform
self.length = imgs.shape[0]
def __len__(self):
return self.length
def __getitem__(self, idx):
labels = np.zeros(28, dtype=np.float32)
labels[self.targets[idx]] = 1
#labels = np.array(self.targets[idx], dtype=np.int64)
img = readimg(self.ids[idx])
#img = img.permute(2,0,1)
if self.transform:
img = self.transform(img)
return img, labels
# +
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image):
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w), mode='reflect', anti_aliasing=True)
return img
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return torch.from_numpy(image).float()
# +
# import skimage
# from skimage.transform import resize
input_size = 299 # resnet, 299 for inception
# data_transforms = {
# 'train': transforms.Compose([
# transforms.RandomResizedCrop(input_size),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]),
# 'val': transforms.Compose([
# transforms.Resize(input_size),
# transforms.CenterCrop(input_size),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]),
# }
trail_transform = transforms.Compose([Rescale(input_size),
ToTensor()])
# -
hpadata = HpaDataset(DATADIR + 'data/png/train.csv', transform=trail_transform)
# Compute statistics for the images
# +
# train_size = int(0.08 * len(hpadata))
# val_size = len(hpadata) - train_size
# train_dataset, val_dataset = random_split(hpadata, [train_size, val_size])
# dl_all_train = DataLoader(train_dataset, batch_size=len(train_dataset), shuffle=True, num_workers=60)
# dl_all_train = iter(dl_all_train)
# test = next(dl_all_train)
# test[0].mean(0).mean(1).mean(1)
# test[0].permute(1,0,2,3).reshape(3, -1).std(1)
# +
from torch.utils.data import DataLoader, random_split
from PIL import Image
from torchvision import transforms
from hpa_src.data.transforms import ToPIL
from torch.nn import BCEWithLogitsLoss
from hpa_src.models.loss import FocalLoss
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
# +
train_transform = transforms.Compose([
ToPIL(),
#transforms.Resize(input_size),
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(30),
transforms.ToTensor(),
# transforms.Normalize((0.1149, 0.0922, 0.0553),
# (0.1694, 0.1381, 0.1551))
])
val_transform = transforms.Compose([
ToPIL(),
transforms.Resize(input_size),
transforms.ToTensor(),
# transforms.Normalize((0.1149, 0.0922, 0.0553),
# (0.1694, 0.1381, 0.1551))
])
# -
plt.imshow(test)
plt.show()
plt.imshow(train_transform(test).permute((1,2,0)))
plt.show()
plt.imshow(val_transform(test).permute((1,2,0)))
plt.show()
# ## Split train validation index
train_transform = transforms.Compose([Rescale(input_size),
ToTensor(),
transforms.Normalize((0.1149, 0.0922, 0.0553),
(0.1694, 0.1381, 0.1551))])
hpadata = HpaDataset(DATADIR + 'data/png/train.csv', transform=train_transform)
train_size = int(0.8 * len(hpadata))
val_size = len(hpadata) - train_size
train_dataset, val_dataset = random_split(hpadata, [train_size, val_size])
dl_train = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
dl_val = DataLoader(val_dataset, batch_size=32, shuffle=True, num_workers=4)
dataloaders = {'train': dl_train, 'val': dl_val}
dataset_sizes = {'train': train_size, 'val': val_size}
# ## Training
from torchvision import models, transforms
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.nn.modules.loss import _WeightedLoss
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# pretrained = models.inception_v3(pretrained=True, transform_input=False)
import pretrainedmodels
pretrained = pretrainedmodels.__dict__['inceptionresnetv2'](num_classes=1000, pretrained='imagenet')
# +
# pretrained.AuxLogits = InceptionAux(768, 28)
# -
pretrained.last_linear = nn.Linear(pretrained.last_linear.in_features, 28)
pretrained = nn.DataParallel(pretrained, device_ids=[0, 1])
pretrained = pretrained.to(device)
class CrossEntropyLossOneHot(_WeightedLoss):
'''
nn.LogSoftmax(x): compute element log(q), q is the predicted probabiliy, input x is the predicted logit
'''
def __init__(self, weight=None, size_average=True, ignore_index=-100,
reduce=None, reduction='elementwise_mean'):
super(CrossEntropyLossOneHot, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input, target):
logsoftmax = nn.LogSoftmax(1)
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
def preds2label(preds, threshold=0, onehot=True):
''' Convert prediction to multilabel
Args:
preds: prediction, default logits
threshod: 0 for logits, 0.5 for probs
'''
label = np.zeros(preds.shape)
for i in range(preds.shape[0]):
lb = np.argwhere(preds[i,:] > threshold)
if onehot:
label[i,lb] = 1
return label
else:
yield np.array(lb).flatten()
# +
criterion = CrossEntropyLossOneHot()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(pretrained.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# +
import time
import copy
from sklearn.metrics import f1_score
def train_model(model, criterion, optimizer, scheduler, num_epochs=3):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_f1 = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_f1 = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
if type(outputs) == tuple:
output, aux_output = outputs
_, preds = torch.max(output, 1)
loss1 = criterion(output, labels)
loss2 = criterion(aux_output, labels)
loss = loss1 + 0.3*loss2
else:
preds = preds2label(outputs)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_f1 += f1_score(labels, preds, average='micro')
epoch_loss = running_loss / dataset_sizes[phase]
epoch_f1 = running_f1 / dataset_sizes[phase]
#epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_f1))
# deep copy the model
if phase == 'val' and epoch_f1 > best_f1:
best_f1 = epoch_f1
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val F1: {:4f}'.format(best_f1))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# -
best_model = train_model(pretrained, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20)
torch.save(best_model.state_dict(), 'models/torch_3epoch')
# ## Prediction
state_dict = torch.load("models/torch_3epoch")
pretrained.load_state_dict(torch.load("models/torch_3epoch"))
tst = pd.read_csv(DATADIR + "data/png/sample_submission.csv")
tst.head()
class TestDataset(Dataset):
def __init__(self, dt, transform=None):
imgs = pd.read_csv(dt)
self.ids = imgs.Id
self.transform = transform
self.length = imgs.shape[0]
def __len__(self):
return self.length
def __getitem__(self, idx):
img = readimg(self.ids[idx], datadir=DATADIR+'data/png/test/')
if self.transform:
img = self.transform(img)
return img
test = TestDataset(DATADIR + 'data/png/sample_submission.csv', transform=train_transform)
test_dl = DataLoader(test, batch_size=32, num_workers=2)
with torch.no_grad():
prediction = [best_model(img) for img in test_dl]
prediction = torch.cat(prediction)
#prediction = preds2label(prediction)
prediction.sum()/ prediction.shape[0]
np.log(0.8/0.2)
preds = preds2label(prediction, onehot=False, threshold=np.log(0.9/0.1))
def array2str(arr):
for i in arr:
yield ' '.join([str(l) for l in i])
preds = list(array2str(preds))
tst.Predicted = preds
tst.head()
tst.to_csv("Submission.csv", index=False)
import tensorflow as tf
| notebooks/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp core
# -
# # Core XLA extensions
#hide
#colab
from google.colab import drive
drive.mount('/content/drive')
#hide_input
#colab
import os
assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator'
#hide
#colab
# !curl -s https://course.fast.ai/setup/colab | bash
# ## Install fastai2
#hide_output
#colab
# !pip install fastai2 > /dev/null
# ## Setup torch XLA
#
#hide_output
#colab
VERSION = "20200707" #@param ["1.5" , "20200325","20200707", "nightly"]
# !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
# !python pytorch-xla-env-setup.py --version $VERSION
#hide
# !pip freeze | grep torch
# !pip freeze | grep fastai2
#hide
#colab
# %cd /content/drive/My\ Drive/fastai_xla_extensions
# ## Check if XLA is available
#hide
XLA_AVAILABLE = False
#export
#colab
XLA_AVAILABLE = True
try:
import torch_xla.core.xla_model as xm
except ImportError as e:
XLA_AVAILABLE = False
import warnings
# warnings.warn('fastai_xla_extensions requires Pytorch-XLA, will revert to default',
# RuntimeWarning)
# ## Fake XLA functionality if XLA not available
# if TPU not available, fake xm to call opt.step anyway,
# and fake xla_device to return gpu if available, else return cpu
# to ensure compatible behavior in using fastai_xla_extensions as
# normal fastai behavior if TPU not available
#export
if not XLA_AVAILABLE:
from types import SimpleNamespace
import torch.cuda
def fake_opt_step(opt,barrier=False):
opt.step()
def fake_device(n=None, devkind=None):
gpu_available = torch.cuda.is_available()
return torch.device(torch.cuda.current_device()) if gpu_available else torch.device('cpu')
xm = SimpleNamespace(
optimizer_step = fake_opt_step,
xla_device = fake_device
)
# ## Add TPU info to defaults if XLA available
#export
if XLA_AVAILABLE:
from fastcore.foundation import defaults
defaults.tpu_device = xm.xla_device(devkind='TPU')
defaults.tpu_available = defaults.tpu_device != None
# ## Monkey patch `default_device` and `to_device` to use TPU if tpu is available
# Replace `fastai2.torch_core.default_device` and `fastai2.torch_core.to_device`
# with ones that will return the TPU device assuming a TPU is available
# (and CPU is not explicitly requested)
#export
if XLA_AVAILABLE and defaults.tpu_available:
import fastai2.torch_core
from fastai2.torch_core import apply
from torch import Tensor
def default_device(use_cuda=-1):
"Return `TPU` as default device"
return defaults.tpu_device
def to_device(b, device=None):
"Recursively put `b` on `device`."
if device is None: device=default_device()
# print(f'setting device to {device}')
def _inner(o): return o.to(device, non_blocking=True) if isinstance(o,Tensor) else o.to_device(device) if hasattr(o, "to_device") else o
return apply(_inner, b)
fastai2.torch_core.default_device = default_device
fastai2.torch_core.to_device = to_device
# ## XLA Optim Proxy
# `XLAOptimProxy` is a class which has overridden the `step` method to call the Pytorch-XLA function `xm.optimizer_step` which synchronizes the XLA graph. All other calls to `XLAOptimProxy` just forward it to the internal `self.opt` instance.
#export
class XLAOptimProxy:
"Proxy optimizer to override `opt.step` with Pytorch XLA sync method `xm.optimizer_step` "
def __init__(self,opt, barrier=True):
self.opt = opt
self._barrier = barrier
def xla_step(self):
xm.optimizer_step(self.opt,barrier=self._barrier) # sync on gradient update
def __getattr__(self,name):
if name == 'step': # override proxying for step
return getattr(self,'xla_step')
if name in ('barrier','_barrier'):
return getattr(self,name)
# proxy everything else
return getattr(self.opt,name)
@property
def barrier(self): return self._barrier
@barrier.setter
def barrier(self,v): self._barrier = v
# ## XLA Opt Callback
# This callback replaces the learner's `opt` with an instance of `XLAOptimProxy` that proxies the original `opt` during the beginning of the `fit` method and restores the original `opt` after the `fit`.
# +
#export
from fastai2.callback.core import Callback
class XLAOptCallback(Callback):
'Callback to replace `opt.step` with `xm.optimizer_step(opt)` as required to run on TPU'
def __init__(self, barrier=True):
self._barrier = barrier
def before_fit(self):
'replace opt with proxy which calls `xm.optimizer_step` instead of `opt.step`'
if self.learn.opt is not None:
if not isinstance(self.learn.opt,XLAOptimProxy):
opt = self.learn.opt
self.learn.opt = XLAOptimProxy(opt, barrier=self._barrier)
def after_fit(self):
'restore original opt '
if isinstance(self.learn.opt, XLAOptimProxy):
opt = self.learn.opt.opt
self.learn.opt = opt
@property
def barrier(self): return self._barrier
@barrier.setter
def barrier(self,v): self._barrier = v
# -
# ## Add XLOptCallback to list of default callbacks if tpu is available
#export
if XLA_AVAILABLE and defaults.tpu_available:
if hasattr(defaults,'callbacks'):
if XLAOptCallback not in defaults.callbacks:
defaults.callbacks.append(XLAOptCallback)
else:
defaults.callbacks = [XLAOptCallback]
# ## Example: Create an MNIST classifier
# This is an example of the fastai_xla_extensions library
# in action.
#
# First, we import fastai libraries.
#hide
#colab
# %cd /content
from fastai2.vision.all import *
# Load data
path = untar_data(URLs.MNIST_TINY)
Path.BASE_PATH = path
# Create datablock
#
# _(note: `batch_tfms` is empty -- this is still being debugged for slow performance)_
datablock = DataBlock(
blocks=(ImageBlock(cls=PILImageBW),CategoryBlock),
get_items=get_image_files,
get_y=parent_label,
splitter=GrandparentSplitter(),
item_tfms=Resize(28),
batch_tfms=[]
)
# Set dataloader to load the batches to the tpu
dls = datablock.dataloaders(path)
#colab
dls.device
dls.show_batch()
#colab
xb,yb = dls.one_batch()
(xb.device, xb.shape)
# Create the Learner
learner = cnn_learner(dls, resnet18, metrics=accuracy)
# The learner.opt should be an instance of fastai `Optimizer`
learner.opt
# The `learner` object should have an `xla_opt` attribute which confirms that `XLAOptCallback` has been added to the list of callbacks for this learner.
#colab
learner.xla_opt
learner.summary()
# Run fine_tune to train the model
#colab
learner.fine_tune(1)
# Run LR Finder to find best values for discriminative learning
#colab
learner.unfreeze()
learner.lr_find()
#colab
learner.fit_one_cycle(4, lr_max=slice(1e-6,1e-4))
# Valid loss still trending down -- looks like more epochs needed to improve model.
#colab
learner.recorder.plot_loss()
# Plot moms and lr across batches/epochs
#colab
learner.recorder.plot_sched()
# Get Classification Interpretation for more details on model performance
#colab
interp = ClassificationInterpretation.from_learner(learner)
# Plot confusion matrix
#colab
interp.plot_confusion_matrix()
# Samples where model was most confused
#colab
interp.plot_top_losses(12)
# Gradient Accum callback (which calls CancelBatchException) should still work.
#
# An alternative design for the XLA Opt Callback which raises the CancelBatchException in the `after_backward` method (after executing `xm.optimizer_step` and `opt.zero_grad`) would interfere with the Gradient Accum callback (which raises `CancelBatchException` in the `after_backward` method to [skip the gradient updates](https://github.com/fastai/fastai2/blob/master/fastai2/callback/training.py#L22) in order to accumulate the gradients).
#
# The current design (add/remove `XLAOptimProxy` during `before_fit` and `after_fit` callback lifecycle methods) is less disruptive and more compatible with other callbacks.
from fastai2.callback.training import *
learner.fit_one_cycle(4,cbs=[GradientAccumulation(n_acc=2),])
| archive_nbs/00_core.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import json
matches = [{"body":"không","start":5,"value":{"value":0,"type":"value"},"end":10,"dim":"number","latent":'false'},{"body":"100","start":20,"value":{"value":100,"type":"value"},"end":23,"dim":"number","latent":'false'}, {"body":"không","start":7,"value":{"value":0,"type":"value"},"end":121,"dim":"number","latent":'false'}]
matches
matches = [i for i in matches if i['body'] != 'không']
matches
| jupyter_dev/remove_wrong_entities_duckling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/reallygooday/60daysofudacity/blob/master/IMPORT_ERROR_ENCRYPTED_TRAINING_WITH_PYTORCH_%26_PYSYFT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uQElRFluKslF" colab_type="text"
# https://colab.research.google.com/drive/14O_RZL8wZnLjAtZqHjzSQNKBzBq8KDds#scrollTo=uQElRFluKslF
# + [markdown] id="5cS52faqHzJ5" colab_type="text"
# https://blog.openmined.org/encrypted-training-on-mnist/
# + id="Gf3SchzMHqQE" colab_type="code" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
# + id="L8erbxBAID-f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b312dc61-02e5-4cfb-a657-2000e68cc6ff"
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 64
self.epochs = 20
self.lr = 0.02
self.seed = 1
self.log_interval = 1 # Log info at each batch
self.precision_fractional = 3
args = Arguments()
torch.manual_seed(args.seed)
# + id="nN6ERd_SIQk4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e7f97cf6-de04-47a7-b57d-7f0a4ccc3478"
# !pip install syft
# + id="Jouyct-kLOBe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="45f432d8-4d13-4ee5-d9d6-66a0349acc40"
# !pip install future
# + id="G5mLI-IXIEXT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 473} outputId="bb07836b-0cb4-4e29-9078-bf86ed481a0d"
import syft as sy # import the Pysyft library
# hook PyTorch to add extra functionalities like Federated and Encrypted Learning
hook = sy.TorchHook(torch)
# simulation functions
from future import connect_to_workers, connect_to_crypto_provider
workers = connect_to_workers(n_workers=2)
crypto_provider = connect_to_crypto_provider()
# + id="FuvVxj29IO3q" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# + id="Y3sbFTyyJHH0" colab_type="code" colab={}
def train(args, model, private_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(private_train_loader): # <-- now it is a private dataset
start_time = time.time()
optimizer.zero_grad()
output = model(data)
# loss = F.nll_loss(output, target) <-- not possible here
batch_size = output.shape[0]
loss = ((output - target)**2).sum().refresh()/batch_size
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
loss = loss.get().float_precision()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTime: {:.3f}s'.format(
epoch, batch_idx * args.batch_size, len(private_train_loader) * args.batch_size,
100. * batch_idx / len(private_train_loader), loss.item(), time.time() - start_time))
# + id="k4R4u5ysJMey" colab_type="code" colab={}
def test(args, model, private_test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in private_test_loader:
start_time = time.time()
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum()
correct = correct.get().float_precision()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
correct.item(), len(private_test_loader)* args.test_batch_size,
100. * correct.item() / (len(private_test_loader) * args.test_batch_size)))
# + id="o2vOPTaLJSdh" colab_type="code" colab={}
model = Net()
model = model.fix_precision().share(*workers, crypto_provider=crypto_provider, requires_grad=True)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
optimizer = optimizer.fix_precision()
for epoch in range(1, args.epochs + 1):
train(args, model, private_train_loader, optimizer, epoch)
test(args, model, private_test_loader)
# + id="tT1xkZvlJa-3" colab_type="code" colab={}
model.fc3.bias
# + id="B-Fxn2dXJhu1" colab_type="code" colab={}
first_batch, input_data = 0, 0
private_train_loader[first_batch][input_data]
# + id="L4xQN3AVJrRt" colab_type="code" colab={}
class MulBackward(GradFunc):
def __init__(self, self_, other):
super().__init__(self, self_, other)
self.self_ = self_
self.other = other
def gradient(self, grad):
grad_self_ = grad * self.other
grad_other = grad * self.self_ if type(self.self_) == type(self.other) else None
return (grad_self_, grad_other)
# + id="fG9oWw4mJtQt" colab_type="code" colab={}
| IMPORT_ERROR_ENCRYPTED_TRAINING_WITH_PYTORCH_&_PYSYFT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [<NAME>](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
#
# Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
# %load_ext watermark
# %watermark -a '<NAME>' -v -p torch
# # Model Zoo -- Using PyTorch Dataset Loading Utilities for Custom Datasets (CSV files converted to HDF5)
# This notebook provides an example for how to load a dataset from an HDF5 file created from a CSV file, using PyTorch's data loading utilities. For a more in-depth discussion, please see the official
#
# - [Data Loading and Processing Tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html)
# - [torch.utils.data](http://pytorch.org/docs/master/data.html) API documentation
#
# An Hierarchical Data Format (HDF) is a convenient way that allows quick access to data instances during minibatch learning if a dataset is too large to fit into memory. The approach outlined in this notebook uses uses the common [HDF5](https://support.hdfgroup.org/HDF5/) format and should be accessible to any programming language or tool with an HDF5 API.
#
# **In this example, we are going to use the Iris dataset for illustrative purposes. Let's pretend it's our large training dataset that doesn't fit into memory**.
#
#
# ## Imports
import pandas as pd
import numpy as np
import h5py
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
# ## Converting a CSV file to HDF5
# In this first step, we are going to process a CSV file (here, Iris) into an HDF5 database:
# +
# suppose this is a large CSV that does not
# fit into memory:
csv_path = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
# Get number of lines in the CSV file if it's on your hard drive:
#num_lines = subprocess.check_output(['wc', '-l', in_csv])
#num_lines = int(nlines.split()[0])
num_lines = 150
num_features = 4
class_dict = {'Iris-setosa': 0,
'Iris-versicolor': 1,
'Iris-virginica': 2}
# use 10,000 or 100,000 or so for large files
chunksize = 10
# this is your HDF5 database:
with h5py.File('iris.h5', 'w') as h5f:
# use num_features-1 if the csv file has a column header
dset1 = h5f.create_dataset('features',
shape=(num_lines, num_features),
compression=None,
dtype='float32')
dset2 = h5f.create_dataset('labels',
shape=(num_lines,),
compression=None,
dtype='int32')
# change range argument from 0 -> 1 if your csv file contains a column header
for i in range(0, num_lines, chunksize):
df = pd.read_csv(csv_path,
header=None, # no header, define column header manually later
nrows=chunksize, # number of rows to read at each iteration
skiprows=i) # skip rows that were already read
df[4] = df[4].map(class_dict)
features = df.values[:, :4]
labels = df.values[:, -1]
# use i-1 and i-1+10 if csv file has a column header
dset1[i:i+10, :] = features
dset2[i:i+10] = labels[0]
# -
# After creating the database, let's double-check that everything works correctly:
with h5py.File('iris.h5', 'r') as h5f:
print(h5f['features'].shape)
print(h5f['labels'].shape)
with h5py.File('iris.h5', 'r') as h5f:
print('Features of entry no. 99:', h5f['features'][99])
print('Class label of entry no. 99:', h5f['labels'][99])
# ## Implementing a Custom Dataset Class
# Now, we implement a custom `Dataset` for reading the training examples. The `__getitem__` method will
#
# 1. read a single training example from HDF5 based on an `index` (more on batching later)
# 2. return a single training example and it's corresponding label
#
# Note that we will keep an open connection to the database for efficiency via `self.h5f = h5py.File(h5_path, 'r')` -- you may want to close it when you are done (more on this later).
class Hdf5Dataset(Dataset):
"""Custom Dataset for loading entries from HDF5 databases"""
def __init__(self, h5_path, transform=None):
self.h5f = h5py.File(h5_path, 'r')
self.num_entries = self.h5f['labels'].shape[0]
self.transform = transform
def __getitem__(self, index):
features = self.h5f['features'][index]
label = self.h5f['labels'][index]
if self.transform is not None:
features = self.transform(features)
return features, label
def __len__(self):
return self.num_entries
# Now that we have created our custom Dataset class, we can initialize a Dataset instance for the training examples using the 'iris.h5' database file. Then, we initialize a `DataLoader` that allows us to read from the dataset.
# +
train_dataset = Hdf5Dataset(h5_path='iris.h5',
transform=None)
train_loader = DataLoader(dataset=train_dataset,
batch_size=50,
shuffle=True,
num_workers=4)
# -
# That's it! Now we can iterate over an epoch using the train_loader as an iterator and use the features and labels from the training dataset for model training as shown in the next section
# ## Iterating Through the Custom Dataset
# +
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(0)
num_epochs = 5
for epoch in range(num_epochs):
for batch_idx, (x, y) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(device)
y = y.to(device)
# do model training on x and y here
# -
# **Remember that we kept an open connection to the HDF5 database in the `Hdf5Dataset` (via `self.h5f = h5py.File(h5_path, 'r')`). Once we are done, we may want to close this connection:**
train_dataset.h5f.close()
# %watermark -iv
| code/model_zoo/pytorch_ipynb/custom-data-loader-csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="1ndZ6XwI7MYA"
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# + [markdown] id="6XzxTZfKwFNo"
# # Feature Extraction
#
# In this tutorial, we look at a simple example of how to use VISSL to extract features for [ResNet-50 Torchvision pre-trained model](https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L16).
#
# You can make a copy of this tutorial by `File -> Open in playground mode` and make changes there. DO NOT request access to this tutorial.
#
# **NOTE:** Please ensure your Collab Notebook has GPU available. To ensure/select this, simple follow: `Edit -> Notebook Settings -> select GPU`.
# + [markdown] id="VohdWhBSw69e"
# ## Install VISSL
#
# Installing VISSL is pretty straightfoward. We will use pip binaries of VISSL and follow instructions from [here](https://github.com/facebookresearch/vissl/blob/master/INSTALL.md#install-vissl-pip-package).
# + id="R5ISg59KTOqU" colab={"base_uri": "https://localhost:8080/"} outputId="52e6fb2e-6bd2-4564-b01d-f946e8449f37"
# Install: PyTorch (we assume 1.5.1 but VISSL works with all PyTorch versions >=1.4)
# !pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html
# install opencv
# !pip install opencv-python
# install apex by checking system settings: cuda version, pytorch version, python version
import sys
import torch
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
print(version_str)
# install apex (pre-compiled with optimizer C++ extensions and CUDA kernels)
# !pip install -f https://dl.fbaipublicfiles.com/vissl/packaging/apexwheels/{version_str}/download.html apex
# install VISSL
# !pip install vissl
# + [markdown] id="u6Fxe3MWxqsI"
# VISSL should be successfuly installed by now and all the dependencies should be available.
# + id="Np6atgoOTPrA"
import vissl
import tensorboard
import apex
import torch
# + [markdown] id="AFEHZ4KdxzWq"
# ## YAML config file for Feature Extraction
#
# VISSL provides yaml configuration files for extracting features [here](https://github.com/facebookresearch/vissl/tree/master/configs/config/feature_extraction).
#
# For the purpose of this tutorial, we will use the config file for extracting features from several layers in the trunk of ResNet-50 supervised model on 1-gpu. Let's go ahead and download the [example config file](https://github.com/facebookresearch/vissl/blob/master/configs/config/feature_extraction/extract_resnet_in1k_8gpu.yaml) and [feature settings for trunk layers](https://github.com/facebookresearch/vissl/blob/master/configs/config/feature_extraction/trunk_only/rn50_layers.yaml).
#
# + id="7ufyNAeUaDSs"
# !mkdir -p configs/config/trunk_only
# !wget -q -O configs/__init__.py https://dl.fbaipublicfiles.com/vissl/tutorials/configs/__init__.py
# !wget -q -O configs/config/extract_resnet_in1k_8gpu.yaml https://dl.fbaipublicfiles.com/vissl/tutorials/configs/extract_resnet_in1k_8gpu.yaml
# !wget -q -O configs/config/trunk_only/rn50_layers.yaml https://dl.fbaipublicfiles.com/vissl/tutorials/configs/trunk_only/rn50_layers.yaml
# + [markdown] id="IxMXLYLpsJXj"
# ## Download the ResNet-50 weights from Torchvision
#
# We download the weights from the [torchvision ResNet50 model](https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L16):
# + id="mv0quZwFsWxs" colab={"base_uri": "https://localhost:8080/"} outputId="b305ee47-9e98-4acb-a0c1-8dace35152da"
# !wget https://download.pytorch.org/models/resnet50-19c8e357.pth
# + [markdown] id="ndNMJGSRyffl"
# ## Builtin feature extraction tool in VISSL
#
# VISSL also provides a [helper python tool](https://github.com/facebookresearch/vissl/blob/master/tools/run_distributed_engines.py) that allows to use VISSL for training purposes. This tool offers:
# - allows training and feature extraction both using VISSL.
# - also allows training on 1-gpu or multi-gpu.
# - can be used to launch multi-machine distributed training.
#
# Let's go ahead and download this tool directly.
# + id="j6io7qQWzCbw" colab={"base_uri": "https://localhost:8080/"} outputId="333c4319-442d-4fbd-9cc8-559c627644cf"
# !wget https://dl.fbaipublicfiles.com/vissl/tutorials/run_distributed_engines.py
# + [markdown] id="J0hng2EPY7pr"
# ## Creating a custom data
#
# For the purpose of this tutorial, since we don't have ImageNet on the disk, we will create a dummy dataset by copying an image from COCO dataset in ImageNet dataset folder style as below:
# + id="5-sy6nD-RfwB"
# !mkdir -p dummy_data/train/class1
# !mkdir -p dummy_data/train/class2
# !mkdir -p dummy_data/val/class1
# !mkdir -p dummy_data/val/class2
# create 2 classes in train and add 5 images per class
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class1/img1.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class1/img2.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class1/img3.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class1/img4.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class1/img5.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class2/img1.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class2/img2.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class2/img3.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class2/img4.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/train/class2/img5.jpg
# create 2 classes in val and add 5 images per class
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class1/img1.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class1/img2.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class1/img3.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class1/img4.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class1/img5.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class2/img1.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class2/img2.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class2/img3.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class2/img4.jpg
# !wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O dummy_data/val/class2/img5.jpg
# + [markdown] id="KPGCiTsXZeW3"
# ## Using the custom data in VISSL
#
# Next step for us is to register the dummy data we created above with VISSL. Registering the dataset involves telling VISSL about the dataset name and the paths for the dataset. For this, we create a simple json file with the metadata and save it to `configs/config/dataset_catalog.py` file.
#
# **NOTE**: VISSL uses the specific `dataset_catalog.json` under the path `configs/config/dataset_catalog.json`.
# + id="M8Q6LCqaWjl1" colab={"base_uri": "https://localhost:8080/"} outputId="c65a7997-8035-4b92-b4e8-c4c3cb3c0c0c"
json_data = {
"dummy_data_folder": {
"train": [
"/content/dummy_data/train", "/content/dummy_data/train"
],
"val": [
"/content/dummy_data/val", "/content/dummy_data/val"
]
}
}
# use VISSL's api to save or you can use your custom code.
from vissl.utils.io import save_file
save_file(json_data, "/content/configs/config/dataset_catalog.json")
# + [markdown] id="otN1pB32cBHK"
# Next, we verify that the dataset is registered with VISSL. For that we query VISSL's dataset catalog as below:
# + colab={"base_uri": "https://localhost:8080/"} id="wZBhH-s5bcHd" outputId="e6081633-3cc0-414a-e558-0bcaf9d9e364"
from vissl.data.dataset_catalog import VisslDatasetCatalog
# list all the datasets that exist in catalog
print(VisslDatasetCatalog.list())
# get the metadata of dummy_data_folder dataset
print(VisslDatasetCatalog.get("dummy_data_folder"))
# + [markdown] id="YaUMDwMdzYHN"
# ## Extract the features
#
# We are ready to extract features now. For the purpose of this tutorial, we will use synthetic dataset and train on dummy images. VISSL supports training on wide range of datasets and allows adding custom datasets. Please see VISSL documentation on how to use the datasets. To train on ImageNet instead: assuming your ImageNet dataset folder path is `/path/to/my/imagenet/folder/`, you can add the following command line
# input to your training command:
# ```
# config.DATA.TRAIN.DATASET_NAMES=[imagenet1k_folder] \
# config.DATA.TRAIN.DATA_SOURCES=[disk_folder] \
# config.DATA.TRAIN.DATA_PATHS=["/path/to/my/imagenet/folder/train"] \
# config.DATA.TRAIN.LABEL_SOURCES=[disk_folder]
# ```
# + [markdown] id="fM7IigSpONW0"
# The feature extraction command looks like:
# + colab={"base_uri": "https://localhost:8080/"} id="6v0HvauIj9S2" outputId="c67440f6-893d-41bc-c537-12a26a4b9439"
# !python3 run_distributed_engines.py \
# hydra.verbose=true \
# config=extract_resnet_in1k_8gpu \
# +config/trunk_only=rn50_layers \
# config.DATA.TRAIN.DATA_SOURCES=[disk_folder] \
# config.DATA.TRAIN.LABEL_SOURCES=[disk_folder] \
# config.DATA.TRAIN.DATASET_NAMES=[dummy_data_folder] \
# config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=2 \
# config.DATA.TEST.DATA_SOURCES=[disk_folder] \
# config.DATA.TEST.LABEL_SOURCES=[disk_folder] \
# config.DATA.TEST.DATASET_NAMES=[dummy_data_folder] \
# config.DATA.TEST.BATCHSIZE_PER_REPLICA=2 \
# config.DISTRIBUTED.NUM_NODES=1 \
# config.DISTRIBUTED.NUM_PROC_PER_NODE=1 \
# config.CHECKPOINT.DIR="./checkpoints" \
# config.MODEL.WEIGHTS_INIT.PARAMS_FILE="/content/resnet50-19c8e357.pth" \
# config.MODEL.WEIGHTS_INIT.APPEND_PREFIX="trunk.base_model._feature_blocks." \
# config.MODEL.WEIGHTS_INIT.STATE_DICT_KEY_NAME=""
# + [markdown] id="A8fILq7VzyOu"
# And we are done!! We have the features for layers `conv1, res2, res3, res4, res5, res5avg` in `checkpoints/*.npy`.
# + colab={"base_uri": "https://localhost:8080/"} id="otUmgl4ms96M" outputId="79e7faa8-ca0a-43f5-b943-574478ae83f9"
# ls checkpoints/
# + [markdown] id="9xFUcTj00B_a"
# # Loading Pre-trained models in VISSL
#
# VISSL supports Torchvision models out of the box. Generally, for loading any non-VISSL model, one needs to correctly set the following configuration options:
#
# ```yaml
# WEIGHTS_INIT:
# # path to the .torch weights files
# PARAMS_FILE: ""
# # name of the state dict. checkpoint = {"classy_state_dict": {layername:value}}. Options:
# # 1. classy_state_dict - if model is trained and checkpointed with VISSL.
# # checkpoint = {"classy_state_dict": {layername:value}}
# # 2. "" - if the model_file is not a nested dictionary for model weights i.e.
# # checkpoint = {layername:value}
# # 3. key name that your model checkpoint uses for state_dict key name.
# # checkpoint = {"your_key_name": {layername:value}}
# STATE_DICT_KEY_NAME: "classy_state_dict"
# # specify what layer should not be loaded. Layer names with this key are not copied
# # By default, set to BatchNorm stats "num_batches_tracked" to be skipped.
# SKIP_LAYERS: ["num_batches_tracked"]
# ####### If loading a non-VISSL trained model, set the following two args carefully #########
# # to make the checkpoint compatible with VISSL, if you need to remove some names
# # from the checkpoint keys, specify the name
# REMOVE_PREFIX: ""
# # In order to load the model (if not trained with VISSL) with VISSL, there are 2 scenarios:
# # 1. If you are interested in evaluating the model features and freeze the trunk.
# # Set APPEND_PREFIX="trunk.base_model." This assumes that your model is compatible
# # with the VISSL trunks. The VISSL trunks start with "_feature_blocks." prefix. If
# # your model doesn't have these prefix you can append them. For example:
# # For TorchVision ResNet trunk, set APPEND_PREFIX="trunk.base_model._feature_blocks."
# # 2. where you want to load the model simply and finetune the full model.
# # Set APPEND_PREFIX="trunk."
# # This assumes that your model is compatible with the VISSL trunks. The VISSL
# # trunks start with "_feature_blocks." prefix. If your model doesn't have these
# # prefix you can append them.
# # For TorchVision ResNet trunk, set APPEND_PREFIX="trunk._feature_blocks."
# # NOTE: the prefix is appended to all the layers in the model
# APPEND_PREFIX: ""
# ```
# + id="oc9YxGbNtFg6"
| tutorials/Feature_Extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="6i_eC-b1E83a" outputId="2e5f8320-9206-4de5-fc2a-315ddcbab895"
# !nvidia-smi
# + id="-y74ze6xFBnj"
from google.colab import drive
# Mount the Google Drive
drive.mount('/content/drive', force_remount=True)
import os
# Now, cd to a folder from my Google Drive
os.chdir("/content/drive/My Drive/Chris/3D-ML/Open3D-ML_master")
# + id="wvWJ5ujAJF0G"
# !pip3 install open3d # 0.13.0
# !pip3 install tensorflow~=2.4.1 # -r requirements-tensorflow.txt
# !pip3 install laspy[lazrs] # 2.0.2
# + [markdown] id="BF43BjesDKM4"
# ## Amsterdam dataloader
# We will make this in a Python script.
#
# NOTE: Comment line 60 (raise keyerror) in the registry.py in dist-packages/tensorflow. You can open this in Colab. Restart the runtime after.
# + id="ITi7ewtMDJdx"
import os
import ml3d as _ml3d
import ml3d.tf as ml3d
cfg_file = "ml3d/configs/randlanet_amsterdam3d.yml"
cfg = _ml3d.utils.Config.load_from_file(cfg_file)
model = ml3d.models.RandLANet(**cfg.model)
cfg.dataset['dataset_path'] = "/content/drive/My Drive/Datasets/street_lights/LiDAR/Weesp_auto_labeling/Cyclomedia_pc_verified"
dataset = _ml3d.datasets.Amsterdam3D(cfg.dataset.pop('dataset_path', None), **cfg.dataset)
pipeline = ml3d.pipelines.SemanticSegmentation(model=model, dataset=dataset, max_epoch=200, batch_size=1, device='gpu')
ckpt_folder = "./logs/"
os.makedirs(ckpt_folder, exist_ok=True)
# + id="GgZUpdjrDoAu"
pipeline.cfg_tb = {
"readme": "readme",
"cmd_line": "cmd_line",
"dataset": "Amsterdam3D",
"model": "RandLaNet",
"pipeline": "Default Pipeline",
}
# + id="LDtkq4RQDrK0"
pipeline.run_train()
# + [markdown] id="oOnsIl5OZnNY"
# ## Evaluate
# + id="W1JorX67rjhx"
# !pip3 install addict
# + id="qqXkfhpHZmYo"
import os
import ml3d as _ml3d
import ml3d.tf as ml3d
cfg_file = "ml3d/configs/randlanet_amsterdam3d.yml"
cfg = _ml3d.utils.Config.load_from_file(cfg_file)
model = ml3d.models.RandLANet(**cfg.model)
cfg.dataset['dataset_path'] = "/content/drive/My Drive/Datasets/street_lights/LiDAR/Weesp_auto_labeling/Cyclomedia_pc_verified"
dataset = _ml3d.datasets.Amsterdam3D(cfg.dataset.pop('dataset_path', None), **cfg.dataset)
pipeline = ml3d.pipelines.SemanticSegmentation(model, dataset=dataset, device="gpu", **cfg.pipeline)
# pretrained model
ckpt_path = "/content/drive/My Drive/Chris/Open3D-ML/Open3D-ML_master/logs/RandLANet_Amsterdam3D_tf/checkpoint/ckpt-11"
# load the parameters.
pipeline.load_ckpt(ckpt_path=ckpt_path)
test_split = dataset.get_split("val")
data = test_split.get_data(0)
# run inference on a single example.
# returns dict with 'predict_labels' and 'predict_scores'.
result = pipeline.run_inference(data)
# evaluate performance on the test set; this will write logs to './logs'.
pipeline.run_test()
# + [markdown] id="N287sOqb_QZC"
# ## Visualize
# + id="DY2y-K_a_SKu"
# based on https://github.com/intel-isl/Open3D-ML/blob/master/examples/vis_pred.py
import numpy as np
import os
import ml3d as _ml3d
import ml3d.tf as ml3d
import sys
from os.path import exists, join, isfile, dirname, abspath, split
import laspy
def get_custom_data(pc_names, path):
pc_data = []
for i, name in enumerate(pc_names):
pc_path = join(path, 'points', name)
data = laspy.read(pc_path)
points = np.vstack((data.x, data.y, data.z)).T.astype(np.float32)
feat = np.vstack((data.red, data.green, data.blue)).T.astype(np.float32)
label = np.zeros((points.shape[0],), dtype=np.int32)
data = {
'point': points,
'feat': feat,
'label': label,
}
pc_data.append(data)
return pc_data
def pred_custom_data(pc_names, pcs, pipeline_r):
vis_points = []
for i, data in enumerate(pcs):
name = pc_names[i]
results_r = pipeline_r.run_inference(data)
pred_label_r = (results_r['predict_labels'] + 1).astype(np.int32)
# WARNING, THIS IS A HACK
# Fill "unlabeled" value because predictions have no 0 values.
pred_label_r[0] = 0
label = data['label']
pts = data['point']
vis_d = {
"name": name + "_randlanet",
"points": pts,
"labels": pred_label_r,
}
vis_points.append(vis_d)
return vis_points
pc_names = ["processed_2633_9595.laz"]
cfg_file = "openje/ml3d/configs/randlanet_amsterdam3d.yml"
cfg = _ml3d.utils.Config.load_from_file(cfg_file)
amsterdam_labels = _ml3d.datasets.Amsterdam3D.get_label_to_names()
v = _ml3d.vis.Visualizer()
lut = _ml3d.vis.LabelLUT()
for val in sorted(amsterdam_labels.keys()):
lut.add_label(amsterdam_labels[val], val)
data_path = "demo_data_amsterdam"
cfg.dataset["dataset_path"] = data_path
v.set_lut("labels", lut)
v.set_lut("pred", lut)
ckpt_path = "checkpoint/ckpt-1"
model = ml3d.models.RandLANet(**cfg.model)
pipeline_r = ml3d.pipelines.SemanticSegmentation(model, **cfg.pipeline)
pipeline_r.load_ckpt(ckpt_path)
pcs = get_custom_data(pc_names, data_path)
pcs_with_pred = pred_custom_data(pc_names, pcs, pipeline_r)
v.visualize(pcs_with_pred)
| RandLANet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Questions 1
# List and its default functions.
# + active=""
# clear() Removes all the elements from the list
# copy() Returns a copy of the list
# count() Returns the number of elements with the specified value
# extend() Add the elements of a list (or any iterable), to the end of the current list
# index() Returns the index of the first element with the specified value
# insert() Adds an element at the specified position
# pop() Removes the element at the specified position
# remove() Removes the first item with the specified value
# reverse() Reverses the order of the list
# sort() Sorts the list
# -
# ## Append
lst = ["Himadri", 10, 7186, 126.43, [1,2,3]]
lst.append("Rupam")
lst
# # Clear
lst = ['Rupam', 10, 7186, 126.43, [1, 2, 3], 'Himadri']
lst.clear()
lst
# # Copy
lst = ['Rupam', 10, 7186, 126.43, [1, 2, 3], 'Himadri']
row = lst.copy()
row
# # Count
lst = ['Rupam', 10, 7186, 126.43, [1, 2, 3],'Himadri']
x = lst.count ("Arghya")
print(x)
# ## Extend
fruits = ['apple', 'banana', 'cherry']
cars = ['Ford', 'BMW', 'Volvo']
fruits.extend(cars)
print(fruits)
# # Index
fruits = ['apple', 'banana', 'cherry']
x = fruits.index("cherry")
print(x)
# # #Questions 2
# Dictionary and its default functions.
# + active=""
# clear() Removes all the elements from the dictionary
# copy() Returns a copy of the dictionary
# fromkeys() Returns a dictionary with the specified keys and value
# get() Returns the value of the specified key
# items() Returns a list containing a tuple for each key value pair
# keys() Returns a list containing the dictionary's keys
# pop() Removes the element with the specified key
# popitem() Removes the last inserted key-value pair
# setdefault() Returns the value of the specified key. If the key does not exist: insert the key, with the specified value
# -
# # Formkeys
x = ('key1', 'key2', 'key3')
y = 2
z = dict.fromkeys(x, y)
print(z)
# # Get
car = {"brand": "Ford","model": "Mustang","year": 1964}
x = car.get("model")
print(x)
# # Items
car = {"brand": "Ford","model": "Mustang","year": 1964}
x = car.items()
print(x)
# # Pop
car = {"brand": "Ford","model": "Mustang","year": 1964}
car.pop("model")
car
# # Set Default
car = {"brand": "Ford","model": "Mustang","year": 1964}
x = car.setdefault("model", "Bronco")
print(x)
car
# # Questions 3
# Sets and its default functions.
# + active=""
# Method Description
# add() Adds an element to the set
# clear() Removes all the elements from the set
# copy() Returns a copy of the set
# difference() Returns a set containing the difference between two or more sets
# difference_update() Removes the items in this set that are also included in another, specified set
# discard() Remove the specified item
# intersection() Returns a set, that is the intersection of two other sets
# intersection_update() Removes the items in this set that are not present in other, specified set(s)
# isdisjoint() Returns whether two sets have a intersection or not
# issubset() Returns whether another set contains this set or not
# issuperset() Returns whether this set contains another set or not
# pop() Removes an element from the set
# remove() Removes the specified element
# symmetric_difference() Returns a set with the symmetric differences of two sets
# symmetric_difference_update() inserts the symmetric differences from this set and another
# union() Return a set containing the union of sets
# update() Update the set with the union of this set and others
# -
# # Difference
x = {"apple", "banana", "cherry"}
y = {"google", "microsoft", "apple"}
z = x.difference(y)
print(z)
# # Discard
fruits = {"apple", "mango", "cherry"}
fruits.discard("mango")
fruits
# # Difference Update
x = {"apple", "banana", "cherry"}
y = {"google", "microsoft", "apple"}
x.difference_update(y)
print(x)
# # Intersection
x = {"apple", "banana", "cherry"}
y = {"google", "microsoft", "apple"}
z = x.intersection(y)
print(z)
# # Indiasjoint
x = {"apple", "banana", "cherry"}
y = {"google", "microsoft", "facebook"}
z = x.isdisjoint(y)
print(z)
# # Questions 4
# Tuple and explore default methods.
# + active=""
# count() Returns the number of times a specified value occurs in a tuple
# index() Searches the tuple for a specified value and returns the position of where it was found
# -
# # Count
thistuple = (1, 3, 7, 8, 5 , 7 , 4, 6, 8, 5)
x = thistuple.count(5)
print(x)
thistuple1 = (1, 3, 7, 8, 5, 4, 6, 8, 5 , 7 ,6)
x = thistuple.count(7)
print (x)
thistuple2 = (1, 3, 7, 8, 5, 4, 6, 8, 5 , 7 ,6)
x = thistuple.count(8)
print(x)
# # Index
thistuple = (1, 2, 3, 8, 7, 5, 3, 6, 8, 5)
x = thistuple.index(8)
print(x)
thistuple = (1, 2, 3, 8, 7, 5, 3, 6, 8, 5)
x = thistuple.index(3)
print(x)
# # Questions 5
# Strings and explore default methods.
# + active=""
# capitalize() Converts the first character to upper case
# casefold() Converts string into lower case
# center() Returns a centered string
# count() Returns the number of times a specified value occurs in a string
# encode() Returns an encoded version of the string
# endswith() Returns true if the string ends with the specified value
# expandtabs() Sets the tab size of the string
# find() Searches the string for a specified value and returns the position of where it was found
# format() Formats specified values in a string
# format_map() Formats specified values in a string
# index() Searches the string for a specified value and returns the position of where it was found
# isalnum() Returns True if all characters in the string are alphanumeric
# isalpha() Returns True if all characters in the string are in the alphabet
# isdecimal() Returns True if all characters in the string are decimals
# isdigit() Returns True if all characters in the string are digits
# isidentifier() Returns True if the string is an identifier
# islower() Returns True if all characters in the string are lower case
# isnumeric() Returns True if all characters in the string are numeric
# isprintable() Returns True if all characters in the string are printable
# isspace() Returns True if all characters in the string are whitespaces
# istitle() Returns True if the string follows the rules of a title
# isupper() Returns True if all characters in the string are upper case
# join() Joins the elements of an iterable to the end of the string
# ljust() Returns a left justified version of the string
# lower() Converts a string into lower case
# lstrip() Returns a left trim version of the string
# maketrans() Returns a translation table to be used in translations
# partition() Returns a tuple where the string is parted into three parts
# replace() Returns a string where a specified value is replaced with a specified value
# rfind() Searches the string for a specified value and returns the last position of where it was found
# rindex() Searches the string for a specified value and returns the last position of where it was found
# rjust() Returns a right justified version of the string
# rpartition() Returns a tuple where the string is parted into three parts
# rsplit() Splits the string at the specified separator, and returns a list
# rstrip() Returns a right trim version of the string
# split() Splits the string at the specified separator, and returns a list
# splitlines() Splits the string at line breaks and returns a list
# startswith() Returns true if the string starts with the specified value
# strip() Returns a trimmed version of the string
# swapcase() Swaps cases, lower case becomes upper case and vice versa
# title() Converts the first character of each word to upper case
# translate() Returns a translated string
# upper() Converts a string into upper case
# zfill() Fills the string with a specified number of 0 values at the beginning
# -
# # Capitalize
txt = "python essential on letsupgrade.in"
x = txt.capitalize()
print(x)
# # Casefold
txt = "Python Essential On LetsUpgrade.In"
x = txt.casefold()
print(x)
# # Center
txt = "Python Essential On LetsUpgrade.In"
x = txt.center (100)
print(x)
# # Encode
txt = "Python Essentiål On LetsUpgråde.In"
x = txt.encode()
print(x)
# # Endswith
txt = "Python Essential On LetsUpgrade.In"
x = txt.endswith (".")
print(x)
txt = "Python Essential On LetsUpgrade.In."
x = txt.endswith (".")
print(x)
# # Expandtabs
txt = "H\te\tl\tl\to"
I = txt.expandtabs(2)
print(I)
| Himadri Python Essential Assignment day 2 batch 7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
arrays = [["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],["one", "two", "one", "two", "one", "two", "one", "two"],]
# -
tuples = list(zip(*arrays))
tuples
index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
index
s = pd.Series(np.random.randn(8), index=index)
s
df = pd.DataFrame(
....: [["bar", "one"], ["bar", "two"], ["foo", "one"], ["foo", "two"]],
....: columns=["first", "second"],
....: )
df
pd.MultiIndex.from_frame(df)
df = pd.DataFrame(np.random.randn(8, 4), index=arrays)
df
df.index.names
df.index
index = df.index
index.get_level_values(0)
df.loc[("bar", "two")]
| datasets/Sternberg/scripts/Hierarchical indexing (MultiIndex).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library(ggplot2)
library(stringr)
library(magrittr)
library(WriteXLS)
library(tidyr)
library(dplyr)
library(plotly)
library(cluster)
library(cowplot)
library(gridExtra)
library(viridis)
library(GenomicRanges)
library(GenomeInfoDb)
library(data.table)
library(ComplexHeatmap)
library(ArchR)
suppressMessages(library(chromVAR))
suppressMessages(library(gchromVAR))
suppressMessages(library(SummarizedExperiment))
suppressMessages(library(data.table))
suppressMessages(library(BiocParallel))
suppressMessages(library(BSgenome.Hsapiens.UCSC.hg38))
# +
# liftover the snps from hg19 to hg38
liftover_tool <- "/home/rs619065/opt/UCSCTools/liftOver"
chain_file <- "/home/rs619065/opt/ChainFiles/hg19ToHg38.over.chain.gz"
lapply(list.files("./CausalSNPs"), function(x){
x <- tools::file_path_sans_ext(basename(x))
message(sprintf("processing : %s", x))
df <- read.table(glue::glue("./CausalSNPs/{x}.txt"), header = TRUE) %>%
subset(., select = c("CHR", "BP", "rsID", "FINEMAP")) %>%
subset(., FINEMAP > 0.01)
df$BP2 <- df$BP
df$BP <- df$BP - 1
df$CHR <- paste0("chr", df$CHR)
df$rsID <- paste0("rs", df$rsID)
df <- subset(df, select = c("CHR","BP", "BP2", "rsID", "FINEMAP"))
df <- df[order(df$CHR, df$BP, df$BP2), ]
write.table(df, file = glue::glue("./CausalSNPsBedFile/{x}.bed"),
sep = "\t", row.names = FALSE, col.names = FALSE,
quote = FALSE)
## hg19 to hg38
command <- glue::glue("{liftover_tool} ./CausalSNPsBedFile/{x}.bed {chain_file} ./CausalSNPs_hg38/{x}.bed ./CausalSNPs_hg38/{x}_umapped.bed")
system(command)
system(glue::glue("rm ./CausalSNPs_hg38/{x}_umapped.bed"))
})
# -
set.seed(42)
proj <- loadArchRProject(path = "../ArchR/HFFiltering", showLogo = FALSE)
# +
## get all linked peaks
p2g_control <- readRDS("../ArchR/Control/p2g.Rds")
p2g_hf_a <- readRDS("../ArchR/HF_A/p2g.Rds")
p2g_hf_ckd <- readRDS("../ArchR/HF_CKD/p2g.Rds")
p2g_control$Peak2GeneLinks$KmeansCluster <- p2g_control$ATAC$kmeansId
p2g_hf_a$Peak2GeneLinks$KmeansCluster <- p2g_hf_a$ATAC$kmeansId
p2g_hf_ckd$Peak2GeneLinks$KmeansCluster <- p2g_hf_ckd$ATAC$kmeansId
df_p2g_control <- as.data.frame(p2g_control$Peak2GeneLinks)
df_p2g_hf_a <- as.data.frame(p2g_hf_a$Peak2GeneLinks)
df_p2g_hf_ckd <- as.data.frame(p2g_hf_ckd$Peak2GeneLinks)
p2g <- rbind(df_p2g_control, df_p2g_hf_a, df_p2g_hf_ckd)
idx_ATAC <- unique(p2g$idxATAC)
# +
counts <- readRDS("../ArchR/HFFiltering/PeakMatrix.Rds")
counts <- counts[idx_ATAC, ]
df <- proj@cellColData %>%
as.data.frame() %>%
subset(., CellType %in% c("fibroblast", "adipocytes", "cardiomyocyte",
"T-cells", "pericyte", "endothelial", "macrophages", "vSMCs"))
df$Condition_CellType <- paste0(df$Condition, "_", df$CellType)
bulkPeakMatrix <- lapply(unique(df$Condition_CellType), function(x){
cells <- rownames(df)[df$Condition_CellType == x]
bulk <- rowMeans(counts[, cells]) %>%
as.data.frame()
colnames(bulk) <- x
bulk
}) %>% Reduce(cbind, .)
#saveRDS(bulkPeakMatrix, file = "bulkPeakMatrix.Rds")
# -
head(bulkPeakMatrix)
# +
# gchromVAR for enrichment
df <- stringr::str_split_fixed(rownames(bulkPeakMatrix), "_", 3)
peaks <- GRanges(seqnames = df[, 1], ranges = IRanges(start = as.numeric(df[, 2]),
end = as.numeric(df[, 3])))
rownames(bulkPeakMatrix) <- NULL
SE <- SummarizedExperiment(assays = list(counts = as.matrix(bulkPeakMatrix)),
rowData = peaks,
colData = DataFrame(names = colnames(bulkPeakMatrix)))
SE <- addGCBias(SE,
genome = BSgenome.Hsapiens.UCSC.hg38)
# +
# enrichment
files <- list.files("./CausalSNPs_hg38",
full.names = TRUE, pattern = "*.bed$")
ukbb <- importBedScore(rowRanges(SE), files, colidx = 5)
ukbb_wDEV <- computeWeightedDeviations(SE, ukbb)
z <- t(assays(ukbb_wDEV)[["z"]])
## remove NA
z <- z[ , colSums(is.na(z)) == 0]
colnames(z) <- stringr::str_split_fixed(colnames(z), "_", 4)[, 1]
# +
Diseases <- c("AF", "Angina", "CAD", "HTN",
"Hypothyroidism", "Hypercholesterolemia", "T2D", "Osteoperosis",
"MS", "RA", "Asthma", "SEL")
Traits <- c("Albumin", "Bilirubin", "CRP", "SerumCreatinine",
"CreatinineKinase", "SerumUrate", "FastingGlucose", "HgA1c",
"LDL", "HDL", "Triglycerides", "TotalCholesterol",
"WC")
df_anno <- data.frame(Phenotype = c(Diseases, Traits),
Anno = c(rep("Disease", length(Diseases)),
rep("Traits", length(Traits))))
rownames(df_anno) <- df_anno$Phenotype
df_anno$Phenotype <- NULL
column_ha <- HeatmapAnnotation(df = df_anno,
col = list(Anno = c("Disease" = "#a6cee3",
"Traits" = "#1f78b4")))
z <- z[, rownames(df_anno)]
pvalue <- pnorm(z, lower.tail = FALSE)
pvalue <- p.adjust(pvalue, method = "BH")
pvalue <- matrix(pvalue, ncol = ncol(z))
rownames(pvalue) <- rownames(z)
colnames(pvalue) <- colnames(z)
cn <- colnames(z)
p <- Heatmap(z, name = "Enrichment",
clustering_distance_columns = "pearson",
clustering_distance_rows = "pearson",
clustering_method_rows = "ward.D2",
clustering_method_columns = "ward.D2",
show_column_names = FALSE,
rect_gp = gpar(col = "black", lwd = 0.5, lty = "solid"),
bottom_annotation = HeatmapAnnotation(
text = anno_text(cn, rot = 60,
location = unit(1, "npc"), just = "right"),
annotation_height = max_text_width(cn)),
cell_fun = function(j, i, x, y, width, height, fill) {
if (pvalue[i, j] < 0.001){
grid.text("***", x, y, gp = gpar(fontsize = 10))
}
else if(pvalue[i, j] < 0.01 & pvalue[i, j] > 0.001){
grid.text("**",
x, y, gp = gpar(fontsize = 10))
} else if(pvalue[i, j] > 0.01 & pvalue[i, j] < 0.05){
grid.text("*",
x, y, gp = gpar(fontsize = 10))
}
})
options(repr.plot.height = 10, repr.plot.width = 12)
p
pdf("./Plots/gwas_enrichment_with_average_signal.pdf", height = 10, width = 12)
draw(p)
dev.off()
# -
saveRDS(z, "./Enrichment/z_score.Rds")
saveRDS(pvalue, "./Enrichment/p_value.Rds")
sessionInfo()
| snATAC/GWAS/01_snp_enrichment.ipynb |
# [](https://colab.research.google.com/github/neurogym/ngym_usage/blob/master/supervised/auto_notebooks/supervised/MotorTiming-v0.ipynb)
# ### Install packages if on Colab
# Uncomment following lines to install
# # ! pip install gym # Install gym
# # ! git clone https://github.com/gyyang/neurogym.git # Install neurogym
# # %cd neurogym/
# # ! pip install -e .
# ### Import packages
# +
import os
from pathlib import Path
import json
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import gym
import neurogym as ngym
device = 'cuda' if torch.cuda.is_available() else 'cpu'
envid = 'MotorTiming-v0'
# -
def get_modelpath(envid):
# Make a local file directories
path = Path('.') / 'files'
os.makedirs(path, exist_ok=True)
path = path / envid
os.makedirs(path, exist_ok=True)
return path
# ### Define network
class Net(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Net, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, hidden = self.lstm(x)
x = self.linear(out)
return x, out
# ### Train network
# +
"""Supervised training networks.
Save network in a path determined by environment ID.
Args:
envid: str, environment ID.
"""
modelpath = get_modelpath(envid)
config = {
'dt': 100,
'hidden_size': 64,
'lr': 1e-2,
'batch_size': 16,
'seq_len': 100,
'envid': envid,
}
env_kwargs = {'dt': config['dt']}
config['env_kwargs'] = env_kwargs
# Save config
with open(modelpath / 'config.json', 'w') as f:
json.dump(config, f)
# Make supervised dataset
dataset = ngym.Dataset(
envid, env_kwargs=env_kwargs, batch_size=config['batch_size'],
seq_len=config['seq_len'])
env = dataset.env
act_size = env.action_space.n
# Train network
net = Net(input_size=env.observation_space.shape[0],
hidden_size=config['hidden_size'],
output_size=act_size)
net = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=config['lr'])
print('Training task ', envid)
running_loss = 0.0
for i in range(2000):
inputs, labels = dataset()
inputs = torch.from_numpy(inputs).type(torch.float).to(device)
labels = torch.from_numpy(labels.flatten()).type(torch.long).to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs, _ = net(inputs)
loss = criterion(outputs.view(-1, act_size), labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 200 == 199:
print('{:d} loss: {:0.5f}'.format(i + 1, running_loss / 200))
running_loss = 0.0
torch.save(net.state_dict(), modelpath / 'net.pth')
print('Finished Training')
# -
def infer_test_timing(env):
"""Infer timing of environment for testing."""
timing = {}
for period in env.timing.keys():
period_times = [env.sample_time(period) for _ in range(100)]
timing[period] = np.median(period_times)
return timing
# ### Run network after training for analysis
# +
"""Run trained networks for analysis.
Args:
envid: str, Environment ID
Returns:
activity: a list of activity matrices, each matrix has shape (
N_time, N_neuron)
info: pandas dataframe, each row is information of a trial
config: dict of network, training configurations
"""
modelpath = get_modelpath(envid)
with open(modelpath / 'config.json') as f:
config = json.load(f)
env_kwargs = config['env_kwargs']
# Run network to get activity and info
# Environment
env = gym.make(envid, **env_kwargs)
env.timing = infer_test_timing(env)
env.reset(no_step=True)
# Instantiate the network and print information
with torch.no_grad():
net = Net(input_size=env.observation_space.shape[0],
hidden_size=config['hidden_size'],
output_size=env.action_space.n)
net = net.to(device)
net.load_state_dict(torch.load(modelpath / 'net.pth'))
perf = 0
num_trial = 100
activity = list()
info = pd.DataFrame()
for i in range(num_trial):
env.new_trial()
ob, gt = env.ob, env.gt
inputs = torch.from_numpy(ob[:, np.newaxis, :]).type(torch.float)
action_pred, hidden = net(inputs)
# Compute performance
action_pred = action_pred.detach().numpy()
choice = np.argmax(action_pred[-1, 0, :])
correct = choice == gt[-1]
# Log trial info
trial_info = env.trial
trial_info.update({'correct': correct, 'choice': choice})
info = info.append(trial_info, ignore_index=True)
# Log stimulus period activity
activity.append(np.array(hidden)[:, 0, :])
print('Average performance', np.mean(info['correct']))
activity = np.array(activity)
# -
# ### General analysis
# +
def analysis_average_activity(activity, info, config):
# Load and preprocess results
plt.figure(figsize=(1.2, 0.8))
t_plot = np.arange(activity.shape[1]) * config['dt']
plt.plot(t_plot, activity.mean(axis=0).mean(axis=-1))
analysis_average_activity(activity, info, config)
# -
def get_conditions(info):
"""Get a list of task conditions to plot."""
conditions = info.columns
# This condition's unique value should be less than 5
new_conditions = list()
for c in conditions:
try:
n_cond = len(pd.unique(info[c]))
if 1 < n_cond < 5:
new_conditions.append(c)
except TypeError:
pass
return new_conditions
# +
def analysis_activity_by_condition(activity, info, config):
conditions = get_conditions(info)
for condition in conditions:
values = pd.unique(info[condition])
plt.figure(figsize=(1.2, 0.8))
t_plot = np.arange(activity.shape[1]) * config['dt']
for value in values:
a = activity[info[condition] == value]
plt.plot(t_plot, a.mean(axis=0).mean(axis=-1), label=str(value))
plt.legend(title=condition, loc='center left', bbox_to_anchor=(1.0, 0.5))
analysis_activity_by_condition(activity, info, config)
# +
def analysis_example_units_by_condition(activity, info, config):
conditions = get_conditions(info)
if len(conditions) < 1:
return
example_ids = np.array([0, 1])
for example_id in example_ids:
example_activity = activity[:, :, example_id]
fig, axes = plt.subplots(
len(conditions), 1, figsize=(1.2, 0.8 * len(conditions)),
sharex=True)
for i, condition in enumerate(conditions):
ax = axes[i]
values = pd.unique(info[condition])
t_plot = np.arange(activity.shape[1]) * config['dt']
for value in values:
a = example_activity[info[condition] == value]
ax.plot(t_plot, a.mean(axis=0), label=str(value))
ax.legend(title=condition, loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_ylabel('Activity')
if i == len(conditions) - 1:
ax.set_xlabel('Time (ms)')
if i == 0:
ax.set_title('Unit {:d}'.format(example_id + 1))
analysis_example_units_by_condition(activity, info, config)
# +
def analysis_pca_by_condition(activity, info, config):
# Reshape activity to (N_trial x N_time, N_neuron)
activity_reshape = np.reshape(activity, (-1, activity.shape[-1]))
pca = PCA(n_components=2)
pca.fit(activity_reshape)
conditions = get_conditions(info)
for condition in conditions:
values = pd.unique(info[condition])
fig = plt.figure(figsize=(2.5, 2.5))
ax = fig.add_axes([0.2, 0.2, 0.7, 0.7])
for value in values:
# Get relevant trials, and average across them
a = activity[info[condition] == value].mean(axis=0)
a = pca.transform(a) # (N_time, N_PC)
plt.plot(a[:, 0], a[:, 1], label=str(value))
plt.legend(title=condition, loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.xlabel('PC 1')
plt.ylabel('PC 2')
analysis_pca_by_condition(activity, info, config)
| training/auto_notebooks/supervised/MotorTiming-v0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# tableName: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import psycopg2 as pg
import pandas.io.sql as psql
connection = pg.connect("host=localhost dbname=replicator user=spark password=<PASSWORD>")
df = psql.read_sql('SELECT * FROM seedsurvivalchancedf', connection)
#url="https://raw.githubusercontent.com/bekisz/alakka/master/output/Replicator_seedSurvivalChance.csv/part-00000-d0328156-909c-4570-9450-145790d2d19e-c000.csv"
#df=pd.read_csv(url, sep=',',header=0)
# df=pd.read_csv('/Users/szabolcsbeki/Documents/GitHub/alakka/output/GaltonWatson_seedSurvivalChance.csv/part-00000-13089a2a-5658-412f-af50-8f54a8d009b8-c000.csv', sep=',',header=0)
#df0= df.loc[df.seedResilience==0.0,:]
df05= df.loc[(df.seedResilience==0.5) & (df.seedMutationProbability==0.0),:]
#df08= df.loc[df.seedResilience==0.8,:]
#df09= df.loc[df.seedResilience==0.9,:]
#df099= df.loc[df.seedResilience==0.99,:]
fig = plt.figure(figsize=(20,10))
fig.subplots_adjust(top=0.8)
ax1 = fig.add_subplot(111)
ax1.set_ylabel('Probability')
ax1.set_xlabel('seedResourceAcquisitionFitness - Seed Resource Acquistion Fitness')
#ax1.set_title('a sine wave')
#ax1.plot(df0['seedResourceAcquisitionFitness'], df0["seedSurvivalChance"], label='Probability of Eternal Survival', color='red', linewidth=3)
#ax1.plot(df0['seedResourceAcquisitionFitness'], df0["seedSurvivalChance"]+df0['error'], label='99% Confidence Interval - High', color='coral', linewidth=1)
#ax1.plot(df0['seedResourceAcquisitionFitness'], df0["seedSurvivalChance"]-df0['error'], label='99% Confidence Interval - Low', color='coral', linewidth=1)
ax1.plot(df05['seedResourceAcquisitionFitness'], df05["seedSurvivalChance"], label='Probability of Eternal Survival', color='orange', linewidth=3)
#ax1.plot(df08['seedResourceAcquisitionFitness'], df08["seedSurvivalChance"], label='Probability of Eternal Survival', color='yellow', linewidth=3)
#ax1.plot(df09['seedResourceAcquisitionFitness'], df09["seedSurvivalChance"], label='Probability of Eternal Survival', color='green', linewidth=3)
#ax1.plot(df099['seedResourceAcquisitionFitness'], df099["seedSurvivalChance"], label='Probability of Eternal Survival', color='blue', linewidth=3)
ax1.legend()
plt.show()
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
url="https://raw.githubusercontent.com/bekisz/alakka/master/output/GaltonWatson_seedSurvivalChanceByTurn.csv/part-00000-8000711b-d0c1-4df2-b975-e173775f2d14-c000.csv"
#dfPop=pd.read_csv(url)
dfPop = psql.read_sql('SELECT * FROM seedpopulationbyturndf', connection)
dfPop=dfPop[dfPop['seedResourceAcquisitionFitness'] == 1.2]
fig = plt.figure(figsize=(20,10))
fig.subplots_adjust(top=0.8)
ax1 = fig.add_subplot(111)
ax1.set_ylabel('Population')
ax1.set_xlabel('Turn - Generation')
ax1.plot(dfPop['turn'], dfPop["seedPopulation"], label='Average of Seed Descendant Population', color='blue', linewidth=3)
ax1.plot(dfPop['turn'], dfPop["seedPopulation"]+dfPop['error'], label='99% Confidence Interval - High', color='lightblue', linewidth=1)
ax1.plot(dfPop['turn'], dfPop["seedPopulation"]-dfPop['error'], label='99% Confidence Interval - Low', color='lightblue', linewidth=1)
ax1.legend()
plt.show()
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
url="https://raw.githubusercontent.com/bekisz/alakka/master/output/Replicator_seedSurvivalChance.csv/part-00000-d0328156-909c-4570-9450-145790d2d19e-c000.csv"
#dfPop=pd.read_csv(url)
dfPop = psql.read_sql('SELECT * FROM seedpopulationbyturndf', connection)
#dfPop=dfPop[dfPop['lambda'] == 1.2]
fig = plt.figure(figsize=(20,10))
fig.subplots_adjust(top=1)
ax1 = fig.add_subplot(111, projection='3d')
#ax1 = fig.gca(projection='3d')
ax1.set_xlabel('resilience')
ax1.set_ylabel('seedResourceAcquisitionFitness')
ax1.set_zlabel('seedSurvivalChance')
ax1.plot_trisurf(dfPop.resilience, dfPop["seedResourceAcquisitionFitness"], dfPop['seedSurvivalChance'], linewidth=0.2,
antialiased=True,shade=True,cmap=plt.cm.Spectral)
ax1.view_init(35, 250)
#ax1.legend()
plt.show()
# -
| src/main/python/ReplictorExperiment.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Convert Dataset Formats
#
# This recipe demonstrates how to use FiftyOne to convert datasets on disk between common formats.
# ## Requirements
#
# This notebook contains bash commands. To run it as a notebook, you must install the [Jupyter bash kernel](https://github.com/takluyver/bash_kernel) via the command below.
#
# Alternatively, you can just copy + paste the code blocks into your shell.
pip install bash_kernel
python -m bash_kernel.install
# In this recipe we'll use the [FiftyOne Dataset Zoo](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/zoo_datasets.html) to download some open source datasets to work with.
#
# Specifically, we'll need [TensorFlow](https://www.tensorflow.org/) and [TensorFlow Datasets](https://www.tensorflow.org/datasets) installed to [access the datasets](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/zoo_datasets.html#customizing-your-ml-backend):
# See https://www.tensorflow.org/install for more install options
pip install tensorflow
pip install tensorflow-datasets
# ## Download datasets
#
#
# Download the test split of the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) from the [FiftyOne Dataset Zoo](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/zoo_datasets.html) using the command below:
# Download the test split of CIFAR-10
fiftyone zoo download cifar10 --split test
# Download the validation split of the [KITTI dataset]( http://www.cvlibs.net/datasets/kitti) from the [FiftyOne Dataset Zoo](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/zoo_datasets.html) using the command below:
# Download the validation split of KITTI
fiftyone zoo download kitti --split validation
# ## The fiftyone convert command
# The [FiftyOne CLI](https://voxel51.com/docs/fiftyone/cli/index.html) provides a number of utilities for importing and exporting datasets in a variety of common (or custom) formats.
#
# Specifically, the `fiftyone convert` command provides a convenient way to convert datasets on disk between formats by specifying the [fiftyone.types.Dataset](https://voxel51.com/docs/fiftyone/api/fiftyone.types.html#fiftyone.types.dataset_types.Dataset) type of the input and desired output.
#
# FiftyOne provides a collection of [builtin types](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#supported-formats) that you can use to read/write datasets in common formats out-of-the-box:
# <div class="convert-recipes-table">
#
# | Dataset format | Import Supported? | Export Supported? | Conversion Supported? |
# | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | ----------------- | --------------------- |
# | [ImageDirectory](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#imagedirectory) | ✓ | ✓ | ✓ |
# | [FiftyOneImageClassificationDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#fiftyoneimageclassificationdataset) | ✓ | ✓ | ✓ |
# | [ImageClassificationDirectoryTree](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#imageclassificationdirectorytree) | ✓ | ✓ | ✓ |
# | [TFImageClassificationDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#tfimageclassificationdataset) | ✓ | ✓ | ✓ |
# | [FiftyOneImageDetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#fiftyoneimagedetectiondataset) | ✓ | ✓ | ✓ |
# | [COCODetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#cocodetectiondataset) | ✓ | ✓ | ✓ |
# | [VOCDetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#vocdetectiondataset) | ✓ | ✓ | ✓ |
# | [KITTIDetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#kittidetectiondataset) | ✓ | ✓ | ✓ |
# | [TFObjectDetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#tfobjectdetectiondataset) | ✓ | ✓ | ✓ |
# | [CVATImageDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#cvatimagedataset) | ✓ | ✓ | ✓ |
# | [FiftyOneImageLabelsDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#fiftyoneimagelabelsdataset) | ✓ | ✓ | ✓ |
# | [BDDDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#bdddataset) | ✓ | ✓ | ✓ |
#
# </div>
# In addition, you can define your own [custom dataset types](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#custom-formats) to read/write datasets in your own formats.
#
# The usage of the `fiftyone convert` command is as follows:
fiftyone convert -h
# ## Convert CIFAR-10 dataset
# When you downloaded the test split of the CIFAR-10 dataset above, it was written to disk as a dataset in [fiftyone.types.FiftyOneImageClassificationDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#fiftyoneimageclassificationdataset) format.
#
# You can verify this by printing information about the downloaded dataset:
fiftyone zoo info cifar10
# The snippet below uses `fiftyone convert` to convert the test split of the CIFAR-10 dataset to [fiftyone.types.ImageClassificationDirectoryTree](https://voxel51.com/docs/fiftyone/user_guide/export_datasets.html#imageclassificationdirectorytree) format, which stores classification datasets on disk in a directory tree structure with images organized per-class:
#
# ```
# <dataset_dir>
# ├── <classA>/
# │ ├── <image1>.<ext>
# │ ├── <image2>.<ext>
# │ └── ...
# ├── <classB>/
# │ ├── <image1>.<ext>
# │ ├── <image2>.<ext>
# │ └── ...
# └── ...
# ```
# +
INPUT_DIR=$(fiftyone zoo find cifar10 --split test)
OUTPUT_DIR=/tmp/fiftyone/cifar10-dir-tree
fiftyone convert \
--input-dir ${INPUT_DIR} --input-type fiftyone.types.FiftyOneImageClassificationDataset \
--output-dir ${OUTPUT_DIR} --output-type fiftyone.types.ImageClassificationDirectoryTree
# -
# Let's verify that the conversion happened as expected:
ls -lah /tmp/fiftyone/cifar10-dir-tree/
ls -lah /tmp/fiftyone/cifar10-dir-tree/airplane/ | head
# Now let's convert the classification directory tree to [TFRecords](https://voxel51.com/docs/fiftyone/user_guide/export_datasets.html#tfimageclassificationdataset) format!
# +
INPUT_DIR=/tmp/fiftyone/cifar10-dir-tree
OUTPUT_DIR=/tmp/fiftyone/cifar10-tfrecords
fiftyone convert \
--input-dir ${INPUT_DIR} --input-type fiftyone.types.ImageClassificationDirectoryTree \
--output-dir ${OUTPUT_DIR} --output-type fiftyone.types.TFImageClassificationDataset
# -
# Let's verify that the conversion happened as expected:
ls -lah /tmp/fiftyone/cifar10-tfrecords
# ## Convert KITTI dataset
# When you downloaded the validation split of the KITTI dataset above, it was written to disk as a dataset in [fiftyone.types.FiftyOneImageDetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/dataset_creation/datasets.html#fiftyoneimagedetectiondataset) format.
#
# You can verify this by printing information about the downloaded dataset:
fiftyone zoo info kitti
# The snippet below uses `fiftyone convert` to convert the test split of the CIFAR-10 dataset to [fiftyone.types.COCODetectionDataset](https://voxel51.com/docs/fiftyone/user_guide/export_datasets.html#cocodetectiondataset) format, which writes the dataset to disk with annotations in [COCO format](https://cocodataset.org/#home).
# +
INPUT_DIR=$(fiftyone zoo find kitti --split validation)
OUTPUT_DIR=/tmp/fiftyone/kitti-coco
fiftyone convert \
--input-dir ${INPUT_DIR} --input-type fiftyone.types.FiftyOneImageDetectionDataset \
--output-dir ${OUTPUT_DIR} --output-type fiftyone.types.COCODetectionDataset
# -
# Let's verify that the conversion happened as expected:
ls -lah /tmp/fiftyone/kitti-coco/
ls -lah /tmp/fiftyone/kitti-coco/data | head
cat /tmp/fiftyone/kitti-coco/labels.json | python -m json.tool 2> /dev/null | head -20
echo "..."
cat /tmp/fiftyone/kitti-coco/labels.json | python -m json.tool 2> /dev/null | tail -20
# Now let's convert from COCO format to [CVAT Image format](https://voxel51.com/docs/fiftyone/user_guide/export_datasets.html#cvatimageformat) format!
# +
INPUT_DIR=/tmp/fiftyone/kitti-coco
OUTPUT_DIR=/tmp/fiftyone/kitti-cvat
fiftyone convert \
--input-dir ${INPUT_DIR} --input-type fiftyone.types.COCODetectionDataset \
--output-dir ${OUTPUT_DIR} --output-type fiftyone.types.CVATImageDataset
# -
# Let's verify that the conversion happened as expected:
ls -lah /tmp/fiftyone/kitti-cvat
cat /tmp/fiftyone/kitti-cvat/labels.xml | head -20
echo "..."
cat /tmp/fiftyone/kitti-cvat/labels.xml | tail -20
# ## Cleanup
#
# You can cleanup the files generated by this recipe by running the command below:
rm -rf /tmp/fiftyone
| docs/source/recipes/convert_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import open3d as o3d
from torch_cluster import fps
import torch
import os,sys
import numpy as np
# ## check the dictionary
path='tmp/scene0000_00_vh_clean_2.pth'
data=torch.load(path)
print(data.keys())
for key in data.keys():
print(key,type(data[key]))
dtype=torch.float
device=torch.device('cuda:0')
def mFPS(in_path,out_path,expect_num,device,dtype):
pcd=o3d.io.read_point_cloud(in_path)
dense_pts=torch.tensor(pcd.points,dtype=dtype,device=device)
n_pts=dense_pts.size()[0]
ratio=expect_num/n_pts*1.0
sparse_indice=fps(dense_pts,ratio=ratio,random_start=False)
if(sparse_indice.size()[0]!=expect_num):
sparse_indice=sparse_indice[list(np.linspace(0,sparse_indice.size()[0],expect_num,endpoint=False,dtype=int))]
sparse_indice=sparse_indice.tolist()
assert len(sparse_indice)==expect_num
# sparse pcd
sparse_points=np.asarray(pcd.points)[sparse_indice]
sparse_colors=np.asarray(pcd.colors)[sparse_indice]
pcd.points=o3d.utility.Vector3dVector()
pcd.colors=o3d.utility.Vector3dVector()
pcd.points=o3d.utility.Vector3dVector(sparse_points)
pcd.colors=o3d.utility.Vector3dVector(sparse_colors)
# write
assert o3d.io.write_point_cloud(out_path, pcd)==True
# # General ScanNet info
# - **Reconstructed surface mesh file (`*.ply`)**:
# Binary PLY format mesh with +Z axis in upright orientation.
#
# - **RGB-D sensor stream (`*.sens`)**:
# Compressed binary format with per-frame color, depth, camera pose and other data. See [ScanNet C++ Toolkit](#scannet-c-toolkit) for more information and parsing code. See [SensReader/python](SensReader/python) for a very basic python data exporter.
#
# - **Surface mesh segmentation file (`*.segs.json`)**:
# ```javascript
# {
# "params": { // segmentation parameters
# "kThresh": "0.0001",
# "segMinVerts": "20",
# "minPoints": "750",
# "maxPoints": "30000",
# "thinThresh": "0.05",
# "flatThresh": "0.001",
# "minLength": "0.02",
# "maxLength": "1"
# },
# "sceneId": "...", // id of segmented scene
# "segIndices": [1,1,1,1,3,3,15,15,15,15], // per-vertex index of mesh segment
# }
# ```
#
# - **Aggregated semantic annotation file (`*.aggregation.json`)**:
# ```javascript
# {
# "sceneId": "...", // id of annotated scene
# "appId": "...", // id + version of the tool used to create the annotation
# "segGroups": [
# {
# "id": 0,
# "objectId": 0,
# "segments": [1,4,3],
# "label": "couch"
# },
# ],
# "segmentsFile": "..." // id of the *.segs.json segmentation file referenced
# }
# ```
# [BenchmarkScripts/util_3d.py](BenchmarkScripts/util_3d.py) gives examples to parsing the semantic instance information from the `*.segs.json`, `*.aggregation.json`, and `*_vh_clean_2.ply` mesh file, with example semantic segmentation visualization in [BenchmarkScripts/3d_helpers/visualize_labels_on_mesh.py](BenchmarkScripts/3d_helpers/visualize_labels_on_mesh.py).
#
# - **2d annotation projections (`*_2d-label.zip`, `*_2d-instance.zip`, `*_2d-label-filt.zip`, `*_2d-instance-filt.zip`)**:
# Projection of 3d aggregated annotation of a scan into its RGB-D frames, according to the computed camera trajectory.
| fps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from os import listdir
# -
filename_list = listdir('pet_images/')
results_dic = {}
values = []
for file in filename_list:
value = [i.lower() for i in file.split('_')[:-1]]
values2 = []
values2.append(' '.join(value))
values.append(values2)
name_list = zip(filename_list, values)
| intropyproject-classify-pet-images/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0 64-bit (''.env'': venv)'
# language: python
# name: python3
# ---
# # CHOMP v2
# __Misc. Utilities__
#
# __by <NAME>__
# __Last updated November 30__, __2021__
# [https://github.com/seangilleran/chomp2](https://github.com/seangilleran/chomp2)
# ## Check Language (With Spacy)
# +
import os
import en_core_web_sm
from spacy.language import Language
from spacy_langdetect import LanguageDetector
path = "./corpus"
# Load language detector.
@Language.factory("language_detector")
def language_detector(nlp, name):
return LanguageDetector()
nlp = en_core_web_sm.load()
nlp.add_pipe("language_detector", last=True)
for file in [f for f in os.listdir(path) if f.endswith(f".txt")]:
with open(os.path.join(path, file), "r", encoding="utf-8") as f:
text = f.read()
# Check language.
lang = nlp(text)._.language
language = lang["language"]
score = lang["score"]
print(f"{language.capitalize()} ({(score * 100):.0f}%): {file}")
with open("lang_check.csv", "a", encoding="utf-8") as f:
f.write(f"{language},{score},{file}\n")
# -
# ## Check Language (With Tag)
# +
import json
import os
path = "./meta"
for file in [f for f in os.listdir(path) if f.endswith(".json")]:
with open(os.path.join(path, file), "r", encoding="utf-8") as f:
collection = json.loads(f.read())
for item in collection["items"]:
lang = item["language"]
for file in item["files"]:
with open("lang_check.csv", "a", encoding="utf-8") as f:
f.write(f"{lang},{file['name']},{file['id']}\n")
# -
# ## Convert PDF to TXT
# +
# TODO
# -
# ## Check Words vs. Enchant Spellcheck
# +
import os
from nltk.tokenize import word_tokenize
import regex as re
path = ".\corpus"
files = []
words = set()
az = re.compile(r"^[a-zA-Z]+$")
files = [f for f in os.listdir(path) if f.endswith(".txt")]
print(f"Loading {len(files)} files...")
for file in files:
word_count = len(words)
with open(os.path.join(path, file), "r", encoding="utf-8") as f:
text = f.read()
for word in word_tokenize(text):
if not az.search(word):
continue
words.add(word)
print(f"Added {len(words) - word_count} words from {file}.")
print("\n** DONE! **")
print(f"Found {len(words)} unique words.")
# +
import enchant
d = enchant.Dict("en_US")
words = list(words)
words.sort()
print(f"Checking {len(words)} words...")
ok_count = 0
nf_count = 0
for word in words:
print(f"(?) {word} ...", end="")
if not d.check(word):
with open("words.txt", "a", encoding="utf-8") as f:
f.write(f"{word}\n")
print("Not Found")
continue
print("OK")
total = ok_count + nf_count
print("\n** DONE! **")
print(f"Could not find spelling for {nf_count} words out of {total} (corpus {(ok_count/total):.0f}% ok).")
| _utilities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
sys.path.append('/'.join(os.getcwd().split('/')[:-1]))
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from Agents import QLearningAgent, BayesianQAgent, PSRLAgent, MomentMatchingAgent, UbeNoUnrollAgent
from Environments import DeepSea, WideNarrow, PriorMDP
from utils import solve_tabular_continuing_PI, run_experiment, run_oracle_experiment, load_agent
from tqdm import tqdm_notebook as tqdm
# For saving figures and agents
if not os.path.exists('results'): os.mkdir('results')
if not os.path.exists('results/figures'): os.mkdir('results/figures')
if not os.path.exists('results/agent_logs'): os.mkdir('results/agent_logs')
fig_loc = 'results/figures/'
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.rc('legend', fontsize=16)
plt.rc('figure', titlesize=50)
# -
# # Environment constants
# +
# DeepSea constants
num_time_steps = 5000
save_every = num_time_steps // 100
N = 4
delta = 1e-1 * np.exp(- N / 4)
rew_params = ((0., delta), (-delta, delta), (1., delta))
env_params = {'N' : N,
'episodic' : False,
'rew_params' : rew_params}
# Define environment
environment = DeepSea(env_params)
# Number of PI steps and maximum buffer length (PSRL, UBE and MM only)
max_iter = 2 * N
max_buffer_length = N + 1
# -
# # Q-Learning
# Agent parameters
agent_params = {'gamma' : 0.9,
'dither_mode' : 'epsilon-greedy',
'dither_param' : 0.5,
'lr' : 0.1,
'Q0' : 0.0,
'anneal_timescale' : float('inf'),
'sa_list' : environment.sa_list()}
# +
# for seed in tqdm(range(10)):
# # Define agent
# agent = QLearningAgent(agent_params)
# # Run experiment
# run_experiment(environment=environment,
# agent=agent,
# seed=seed,
# num_time_steps=num_time_steps,
# max_buffer_length=1,
# save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, QLearningAgent(agent_params), seed=0)
means = []
for t in np.arange(num_time_steps // save_every):
means.append([])
for s in range(4):
means[-1].append([])
for a in range(2):
means[-1][-1].append(agent.Qlog[t][s][a])
means = np.array(means)
fig = plt.figure(figsize=(10, 6))
actions = ['left', 'right']
leg_names = ['$Q_{\mathbf{s}, \mathbf{a}}$', '$Q^*_{\mathbf{s}, \mathbf{a}}$']
leg = [None] * 2
for s in range(4):
for a in range(2):
plt.subplot(2, 4, s + 4 * a + 1)
leg[0], = plt.plot(np.arange(len(means[:, s, a])) * save_every, means[:, s, a], color='black')
leg[1], = plt.plot(np.arange(len(means[:, s, a])) * save_every,
np.ones(len(means[:, s, a])) * Q[s, a],
'--',
color='red')
plt.ylim([0, 2])
plt.yticks([0, 1, 2])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.xlim([0, num_time_steps])
plt.tight_layout()
if s > 0:
plt.yticks([])
else:
plt.ylabel('$Q$', fontsize=20)
if a == 0:
plt.xticks([])
else:
plt.xlabel('t', fontsize=20)
plt.title(r's = {}, {}'.format(s + 1, actions[a]), fontsize=15)
plt.ylim([0, 2.2])
fig.suptitle('QL $Q$-estimates on DeepSea ($\epsilon$ = {}, $N$ = {})'.format(agent.dither_param, N), fontsize=24)
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=2, labelspacing=0., fontsize=20)
fig.subplots_adjust(top=0.86, bottom=0.25)
save_name = 'ql-{}-qestimates-deepsea-{}'.format(agent.dither_param, N).replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
agent_rs, oracle_rs = [], []
for seed in range(10):
# Define agent
agent = load_agent(environment, QLearningAgent(agent_params), seed=seed)
agent_rs.append(agent.train_r)
oracle_r = run_oracle_experiment(environment=environment,
seed=seed,
gamma=0.9,
num_time_steps=num_time_steps,
num_PI_iter=max_iter)[2]
oracle_rs.append(oracle_r)
agent_rs, oracle_rs = np.array(agent_rs), np.array(oracle_rs)
regrets = np.cumsum(oracle_rs - agent_rs, axis=-1)
means = np.mean(regrets, axis=0)
stds = np.var(regrets, axis=0)**0.5
plt.figure(figsize=(6, 6))
plt.plot(means, color='k')
plt.fill_between(np.arange(len(means)), means - stds, means + stds, color='k', alpha=0.2)
plt.locator_params(axis='y', nbins=4)
plt.locator_params(axis='x', nbins=4)
plt.title('QL regret on DeepSea\n($\epsilon$ = {}, $N$ = {})'.format(agent.dither_param, N),
fontsize=24)
plt.xlabel('Time step $t$', fontsize=22)
plt.ylabel('Regret to oracle', fontsize=22)
plt.xlim([0, num_time_steps])
plt.ylim([0, 600])
save_name = 'ql-{}-regret-deepsea-{}'.format(agent.dither_param, N).replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # Bayesian Q-Learning
# Agent parameters
agent_params = {'gamma' : 0.9,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'num_mixture_samples' : 1000,
'sa_list' : environment.sa_list()}
for seed in tqdm(range(10)):
# Define agent
agent = BayesianQAgent(agent_params)
# Run experiment
run_experiment(environment=environment,
agent=agent,
seed=seed,
num_time_steps=num_time_steps,
max_buffer_length=1,
save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, BayesianQAgent(agent_params), seed=seed)
means, stds = [], []
for t in np.arange(num_time_steps // save_every):
means.append([])
stds.append([])
for s in range(4):
means[-1].append([])
stds[-1].append([])
for a in range(2):
mu, lamda, alpha, beta = agent.Qpost_log[t][s][a]
var = beta / (lamda * (alpha - 1))
means[-1][-1].append(mu)
stds[-1][-1].append(var**0.5)
means, stds = np.array(means), np.array(stds)
fig = plt.figure(figsize=(10, 6))
actions = ['left', 'right']
leg_names = ['$\mathbb{E}_{\\theta_{\mathcal{Z}}}[\mu_z]$',
'Var$_{\\theta_{\mathcal{Z}}}[\mu_z]^{1/2}$',
'$Q^*_{\mathbf{s}, \mathbf{a}}$']
leg = [None] * 3
for s in range(4):
for a in range(2):
plt.subplot(2, 4, s + 4 * a + 1)
leg[0], = plt.plot(np.arange(len(means[:, s, a])) * save_every, means[:, s, a], color='purple')
leg[1] = plt.fill_between(np.arange(len(means[:, s, a])) * save_every,
means[:, s, a] + stds[:, s, a],
means[:, s, a] - stds[:, s, a],
color='purple', alpha=0.2)
leg[2], = plt.plot(np.arange(len(means[:, s, a])) * save_every,
np.ones(len(means[:, s, a])) * Q[s, a],
'--',
color='red')
plt.ylim([0, 2])
plt.yticks([0, 1, 2])
plt.xlim([0, len(means[:, s, a])])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.tight_layout()
if s > 0:
plt.yticks([])
else:
plt.ylabel('$\mu_z$', fontsize=20)
if a == 0:
plt.xticks([])
else:
plt.xlabel('t', fontsize=20)
plt.title(r's = {}, {}'.format(s + 1, actions[a]), fontsize=15)
plt.ylim([0, 2.2])
fig.suptitle('BQL posterior on DeepSea ($N$ = {})'.format(N), fontsize=24)
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=3, labelspacing=0., fontsize=20)
plt.tight_layout(w_pad=1)
fig.subplots_adjust(top=0.86, bottom=0.25)
mu0, lamda, alpha, beta = agent.mu0, agent.lamda, agent.alpha, agent.beta
save_name = 'bql-{}-{}-{}-{}-posterior-deepsea-{}'.format(mu0, lamda, alpha, beta, N).replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
color = 'purple'
agent_rs, oracle_rs = [], []
for seed in range(10):
# Define agent
agent = load_agent(environment, BayesianQAgent(agent_params), seed=seed)
agent_rs.append(agent.train_r)
oracle_r = run_oracle_experiment(environment=environment,
seed=seed,
gamma=0.9,
num_time_steps=num_time_steps,
num_PI_iter=max_iter)[2]
oracle_rs.append(oracle_r)
agent_rs, oracle_rs = np.array(agent_rs), np.array(oracle_rs)
regrets = np.cumsum(oracle_rs - agent_rs, axis=-1)
means = np.mean(regrets, axis=0)
stds = np.var(regrets, axis=0)**0.5
plt.figure(figsize=(6, 6))
plt.plot(means, color=color)
plt.fill_between(np.arange(len(means)), means - stds, means + stds, color=color, alpha=0.2)
plt.locator_params(axis='y', nbins=4)
plt.locator_params(axis='x', nbins=4)
plt.title('BQL regret on DeepSea\n($N$ = {})'.format(N), fontsize=24)
plt.xlabel('Time step $t$', fontsize=22)
plt.ylabel('Regret to oracle', fontsize=22)
plt.xlim([0, num_time_steps])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.ylim([0, 600])
mu0, lamda, alpha, beta = agent.mu0, agent.lamda, agent.alpha, agent.beta
save_name = 'bql-{}-{}-{}-{}-regret-deepsea-{}'.format(mu0, lamda, alpha, beta, N).replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # PSRL
# Agent parameters
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'max_iter' : max_iter,
'sa_list' : environment.sa_list()}
for seed in tqdm(range(10)):
# Define agent
agent = PSRLAgent(agent_params)
# Run experiment
run_experiment(environment=environment,
agent=agent,
seed=seed,
num_time_steps=num_time_steps,
max_buffer_length=max_buffer_length,
save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, PSRLAgent(agent_params), seed=0)
Qs = []
for t in np.arange(num_time_steps // save_every):
Qs.append([])
for i in range(100):
agent.Ppost, agent.Rpost = agent.Ppost_log[t], agent.Rpost_log[t]
P, R = agent.sample_posterior()
pi, Q_ = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
Qs[-1].append(Q_)
Qs = np.array(Qs)
means = np.mean(Qs, axis=1)
stds = np.var(Qs, axis=1)**0.5
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
fig = plt.figure(figsize=(10, 6))
color = 'orange'
actions = ['left', 'right']
leg_names = ['$\mathbb{E}_{\\theta_{\mathcal{T}}, \\theta_{\mathcal{R}}}~[\hat{Q}^*_{\mathbf{s}, \mathbf{a}}]$',
'Var$_{\\theta_{\mathcal{T}}, \\theta_{\mathcal{R}}}[\hat{Q}^*_{\mathbf{s}, \mathbf{a}}]^{1/2}$',
'$Q^*_{\mathbf{s}, \mathbf{a}}$']
leg = [None] * 3
for s in range(4):
for a in range(2):
plt.subplot(2, 4, s + 4 * a + 1)
leg[0], = plt.plot(np.arange(len(means[:, s, a])) * save_every, means[:, s, a], color=color)
leg[1] = plt.fill_between(np.arange(len(means[:, s, a])) * save_every,
means[:, s, a] + stds[:, s, a],
means[:, s, a] - stds[:, s, a],
color=color, alpha=0.2)
leg[2], = plt.plot(np.arange(len(means[:, s, a])) * save_every,
np.ones(len(means[:, s, a])) * Q[s, a],
'--',
color='red')
plt.ylim([0, 2])
plt.yticks([0, 1, 2])
plt.xlim([0, len(means[:, s, a])])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.tight_layout()
if s > 0:
plt.yticks([])
else:
plt.ylabel('$\hat{Q}^*$', fontsize=20)
if a == 0:
plt.xticks([])
else:
plt.xlabel('t', fontsize=20)
plt.title(r's = {}, {}'.format(s + 1, actions[a]), fontsize=15)
plt.ylim([0, 2.2])
fig.suptitle('PSRL posterior on DeepSea ($N$ = {})'.format(N), fontsize=24)
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=3, labelspacing=0., fontsize=20)
plt.tight_layout(w_pad=1)
fig.subplots_adjust(top=0.86, bottom=0.25)
mu0, lamda, alpha, beta = agent.mu0, agent.lamda, agent.alpha, agent.beta
save_name = 'psrl-{}-{}-{}-{}-posterior-deepsea-{}'.format(mu0, lamda, alpha, beta, N).replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
color = 'orange'
agent_rs, oracle_rs = [], []
for seed in range(10):
# Define agent
agent = PSRLAgent(agent_params)
agent = load_agent(environment, agent, seed=seed)
agent_rs.append(agent.train_r)
oracle_r = run_oracle_experiment(environment=environment,
seed=seed,
gamma=0.9,
num_time_steps=num_time_steps,
num_PI_iter=max_iter)[2]
oracle_rs.append(oracle_r)
agent_rs, oracle_rs = np.array(agent_rs), np.array(oracle_rs)
regrets = np.cumsum(oracle_rs - agent_rs, axis=-1)
means = np.mean(regrets, axis=0)
stds = np.var(regrets, axis=0)**0.5
plt.figure(figsize=(6, 6))
plt.plot(means, color=color)
plt.fill_between(np.arange(len(means)), means - stds, means + stds, color=color, alpha=0.2)
plt.locator_params(axis='y', nbins=4)
plt.locator_params(axis='x', nbins=4)
plt.title('PSRL regret on DeepSea\n($N =$ {})'.format(N), fontsize=24)
plt.xlabel('Time step $t$', fontsize=22)
plt.ylabel('Regret to oracle', fontsize=22)
plt.xlim([0, num_time_steps])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.ylim([0, 600])
mu0, lamda, alpha, beta = agent.mu0, agent.lamda, agent.alpha, agent.beta
save_name = 'psrl-{}-{}-{}-{}-regret-deepsea-{}'.format(mu0, lamda, alpha, beta, N).replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # UBE
# Agent parameters
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'Rmax' : environment.get_mean_P_and_R()[1].max(),
'max_iter' : max_iter,
'zeta' : 1.0,
'num_dyn_samples' : 100,
'sa_list' : environment.sa_list()}
for seed in tqdm(range(10)):
# Define agent
agent = UbeNoUnrollAgent(agent_params)
# Run experiment
run_experiment(environment=environment,
agent=agent,
seed=seed,
num_time_steps=num_time_steps,
max_buffer_length=max_buffer_length,
save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=0)
means, stds = [], []
for t in np.arange(num_time_steps // save_every):
means.append(agent.Qmu_log[t])
stds.append(agent.Qvar_log[t]**0.5)
means = np.array(means)
stds = np.array(stds)
fig = plt.figure(figsize=(10, 6))
color = 'green'
actions = ['left', 'right']
leg_names = ['$\mathbb{E}_{\\theta_{\mathcal{T}}, \\theta_{\mathcal{R}}}[\mu_{z_{\mathbf{s}, \mathbf{a}}}]$',
'$u^{1/2}_{\mathbf{s}, \mathbf{a}}$',
'$Q^*_{\mathbf{s}, \mathbf{a}}$']
leg = [None] * 3
s, a = 0, 0
for s in range(4):
for a in range(2):
plt.subplot(2, 4, s + 4 * a + 1)
leg[0], = plt.plot(np.arange(len(means[:, s, a])) * save_every, means[:, s, a], color=color)
leg[1] = plt.fill_between(np.arange(len(means[:, s, a])) * save_every,
means[:, s, a] + stds[:, s, a],
means[:, s, a] - stds[:, s, a],
color=color, alpha=0.2)
leg[2], = plt.plot(np.arange(len(means[:, s, a])) * save_every,
np.ones(len(means[:, s, a])) * Q[s, a],
'--',
color='red')
plt.ylim([-8, 8])
plt.yticks([-8, 0, 8])
plt.xlim([0, len(means[:, s, a])])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.tight_layout()
if s > 0:
plt.yticks([])
else:
plt.ylabel('$\mu_z$', fontsize=20)
if a == 0:
plt.xticks([])
else:
plt.xlabel('t', fontsize=20)
plt.title(r's = {}, {}'.format(s + 1, actions[a]), fontsize=15)
fig.suptitle('UBE posterior on DeepSea ($\zeta =$ {}, $N =$ {})'.format(agent.zeta, N), fontsize=24)
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=3, labelspacing=0., fontsize=20)
fig.subplots_adjust(top=0.86, bottom=0.25)
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'ube-{}-{}-{}-{}-{}-posterior-deepsea-{}'.format(mu0, lamda, alpha, beta, zeta, N)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
color = 'green'
agent_rs, oracle_rs = [], []
for seed in range(10):
# Define agent
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=seed)
agent_rs.append(agent.train_r)
oracle_r = run_oracle_experiment(environment=environment,
seed=seed,
gamma=0.9,
num_time_steps=num_time_steps,
num_PI_iter=max_iter)[2]
oracle_rs.append(oracle_r)
agent_rs, oracle_rs = np.array(agent_rs), np.array(oracle_rs)
regrets = np.cumsum(oracle_rs - agent_rs, axis=-1)
means = np.mean(regrets, axis=0)
stds = np.var(regrets, axis=0)**0.5
plt.figure(figsize=(6, 6))
plt.plot(means, color=color)
plt.fill_between(np.arange(len(means)), means - stds, means + stds, color=color, alpha=0.2)
plt.locator_params(axis='y', nbins=4)
plt.locator_params(axis='x', nbins=4)
plt.title('UBE regret on DeepSea\n($\zeta$ = {}, $N$ = {})'.format(agent.zeta, N),
fontsize=24)
plt.xlabel('Time step $t$', fontsize=22)
plt.ylabel('Regret to oracle', fontsize=22)
plt.xlim([0, num_time_steps])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.ylim([0, 600])
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'ube-{}-{}-{}-{}-{}-regret-deepsea-{}'.format(mu0, lamda, alpha, beta, zeta, N)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
# # UBE
# Agent parameters
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'Rmax' : environment.get_mean_P_and_R()[1].max(),
'max_iter' : max_iter,
'zeta' : 0.1,
'num_dyn_samples' : 100,
'sa_list' : environment.sa_list()}
for seed in tqdm(range(10)):
# Define agent
agent = UbeNoUnrollAgent(agent_params)
# Run experiment
run_experiment(environment=environment,
agent=agent,
seed=seed,
num_time_steps=num_time_steps,
max_buffer_length=max_buffer_length,
save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=0)
means, stds = [], []
for t in np.arange(num_time_steps // save_every):
means.append(agent.Qmu_log[t])
stds.append(agent.Qvar_log[t]**0.5)
means = np.array(means)
stds = np.array(stds)
fig = plt.figure(figsize=(10, 6))
color = 'green'
actions = ['left', 'right']
leg_names = ['$\mathbb{E}_{\\theta_{\mathcal{T}}, \\theta_{\mathcal{R}}}[\mu_{z_{\mathbf{s}, \mathbf{a}}}]$',
'$u^{1/2}_{\mathbf{s}, \mathbf{a}}$',
'$Q^*_{\mathbf{s}, \mathbf{a}}$']
leg = [None] * 3
s, a = 0, 0
for s in range(4):
for a in range(2):
plt.subplot(2, 4, s + 4 * a + 1)
leg[0], = plt.plot(np.arange(len(means[:, s, a])) * save_every, means[:, s, a], color=color)
leg[1] = plt.fill_between(np.arange(len(means[:, s, a])) * save_every,
means[:, s, a] + stds[:, s, a],
means[:, s, a] - stds[:, s, a],
color=color, alpha=0.2)
leg[2], = plt.plot(np.arange(len(means[:, s, a])) * save_every,
np.ones(len(means[:, s, a])) * Q[s, a],
'--',
color='red')
plt.ylim([-8, 8])
plt.yticks([-8, 0, 8])
plt.xlim([0, len(means[:, s, a])])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.tight_layout()
if s > 0:
plt.yticks([])
else:
plt.ylabel('$\mu_z$', fontsize=20)
if a == 0:
plt.xticks([])
else:
plt.xlabel('t', fontsize=20)
plt.title(r's = {}, {}'.format(s + 1, actions[a]), fontsize=15)
fig.suptitle('UBE posterior on DeepSea ($\zeta$ = {}, $N$ = {})'.format(agent.zeta, N), fontsize=24)
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=3, labelspacing=0., fontsize=20)
fig.subplots_adjust(top=0.86, bottom=0.25)
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'ube-{}-{}-{}-{}-{}-posterior-deepsea-{}'.format(mu0, lamda, alpha, beta, zeta, N)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
color = 'green'
agent_rs, oracle_rs = [], []
for seed in range(10):
# Define agent
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=seed)
agent_rs.append(agent.train_r)
oracle_r = run_oracle_experiment(environment=environment,
seed=seed,
gamma=0.9,
num_time_steps=num_time_steps,
num_PI_iter=max_iter)[2]
oracle_rs.append(oracle_r)
agent_rs, oracle_rs = np.array(agent_rs), np.array(oracle_rs)
regrets = np.cumsum(oracle_rs - agent_rs, axis=-1)
means = np.mean(regrets, axis=0)
stds = np.var(regrets, axis=0)**0.5
plt.figure(figsize=(6, 6))
plt.plot(means, color=color)
plt.fill_between(np.arange(len(means)), means - stds, means + stds, color=color, alpha=0.2)
plt.locator_params(axis='y', nbins=4)
plt.locator_params(axis='x', nbins=4)
plt.title('UBE regret on DeepSea\n($\zeta$ = {}, $N$ = {})'.format(agent.zeta, N), fontsize=24)
plt.xlabel('Time step $t$', fontsize=22)
plt.ylabel('Regret to oracle', fontsize=22)
plt.xlim([0, num_time_steps])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.ylim([0, 600])
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'ube-{}-{}-{}-{}-{}-regret-deepsea-{}'.format(mu0, lamda, alpha, beta, zeta, N)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
agent = load_agent(environment, UbeNoUnrollAgent(agent_params), seed=0)
var_rew, var_Qmax = agent.local_rew_var(agent.sample_dynamics(), each_term=True)
x = np.arange(4) + 1
fig = plt.figure(figsize=(11, 5.5))
for a, a_label in enumerate(['left', 'right']):
plt.subplot(2, 1, a + 1)
plt.bar(x-0.1, var_rew[:, a], width=0.2, color='b', align='center',
facecolor="darkolivegreen", alpha=0.2, label='Reward epistemic variance')
plt.bar(x-0.1, var_rew[:, a], width=0.2, color='b', align='center',
facecolor="None", edgecolor='k')
plt.bar(x+0.1, var_Qmax[:, a], width=0.2, color='g', align='center',
facecolor="c", alpha=0.2, label='$Q_{max}$ term')
plt.bar(x+0.1, var_Qmax[:, a], width=0.2, color='g', align='center',
facecolor="None", edgecolor='k')
plt.gca().set_yscale('log')
if a == 1:
plt.xticks([1, 2, 3, 4])
plt.xlabel('State', fontsize=20)
else:
plt.title('UBE local uncertainty terms on DeepSea ($t = 5000, N = 4$)', fontsize=24)
plt.xticks([])
plt.yticks([1e-3, 1e-1, 1e1, 1e3])
plt.ylabel('Epist. unc.\n(a = {})'.format(str(a_label)), fontsize=20)
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig(fig_loc + 'ube-uncertainty.pdf', bbox_inches='tight')
plt.show()
# -
# # Moment Matching
# Agent constants
agent_params = {'gamma' : 0.9,
'kappa' : 1.0,
'mu0' : 0.0,
'lamda' : 4.0,
'alpha' : 3.0,
'beta' : 3.0,
'max_iter' : max_iter,
'zeta' : 1.0,
'num_dyn_samples' : 100,
'sa_list' : environment.sa_list()}
for seed in tqdm(range(10)):
# Define agent
agent = MomentMatchingAgent(agent_params)
# Run experiment
run_experiment(environment=environment,
agent=agent,
seed=seed,
num_time_steps=num_time_steps,
max_buffer_length=max_buffer_length,
save_every=save_every)
# +
P, R = environment.get_mean_P_and_R()
pi, Q = solve_tabular_continuing_PI(P, R, gamma=0.9, max_iter=max_iter)
agent = load_agent(environment, MomentMatchingAgent(agent_params), seed=0)
means, stds = [], []
for t in np.arange(num_time_steps // save_every):
means.append(agent.mu_log[t])
stds.append(agent.var_log[t]**0.5)
means = np.array(means)
stds = np.array(stds)
fig = plt.figure(figsize=(10, 6))
color = 'deepskyblue'
actions = ['left', 'right']
leg_names = ['$\mathbb{E}_{\\theta_{\mathcal{T}}, \\theta_{\mathcal{R}}}[\mu_{z^*_{\mathbf{s}, \mathbf{a}}}]$',
'Var$_{\\theta_{\mathcal{T}}, \\theta_{\mathcal{R}}}[\mu_{z^*_{\mathbf{s}, \mathbf{a}}}]^{1/2}$',
'$Q^*_{\mathbf{s}, \mathbf{a}}$']
leg = [None] * 3
for s in range(4):
for a in range(2):
plt.subplot(2, 4, s + 4 * a + 1)
leg[0], = plt.plot(np.arange(len(means[:, s, a])) * save_every, means[:, s, a], color=color)
leg[1] = plt.fill_between(np.arange(len(means[:, s, a])) * save_every,
means[:, s, a] + stds[:, s, a],
means[:, s, a] - stds[:, s, a],
color=color, alpha=0.2)
leg[2], = plt.plot(np.arange(len(means[:, s, a])) * save_every,
np.ones(len(means[:, s, a])) * Q[s, a],
'--',
color='red')
plt.ylim([0, 2])
plt.yticks([0, 1, 2])
plt.xlim([0, len(means[:, s, a])])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.tight_layout()
if s > 0:
plt.yticks([])
else:
plt.ylabel('$\mu_z$', fontsize=20)
if a == 0:
plt.xticks([])
else:
plt.xlabel('t', fontsize=20)
plt.title(r's = {}, {}'.format(s + 1, actions[a]), fontsize=15)
fig.suptitle('MM posterior on DeepSea ($\zeta$ = {}, $N$ = {})'.format(agent.zeta, N),
fontsize=24)
plt.figlegend(leg, leg_names, loc = 'lower center', ncol=3, labelspacing=0., fontsize=20)
fig.subplots_adjust(top=0.86, bottom=0.25)
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'mm-{}-{}-{}-{}-{}-posterior-deepsea-{}'.format(mu0, lamda, alpha, beta, zeta, N)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# +
color = 'deepskyblue'
agent_rs, oracle_rs = [], []
for seed in range(10):
# Define agent
agent = load_agent(environment, MomentMatchingAgent(agent_params), seed=seed)
agent_rs.append(agent.train_r)
oracle_r = run_oracle_experiment(environment=environment,
seed=seed,
gamma=0.9,
num_time_steps=num_time_steps,
num_PI_iter=max_iter)[2]
oracle_rs.append(oracle_r)
agent_rs, oracle_rs = np.array(agent_rs), np.array(oracle_rs)
regrets = np.cumsum(oracle_rs - agent_rs, axis=-1)
means = np.mean(regrets, axis=0)
stds = np.var(regrets, axis=0)**0.5
plt.figure(figsize=(6, 6))
plt.plot(means, color=color)
plt.fill_between(np.arange(len(means)), means - stds, means + stds, color=color, alpha=0.2)
plt.locator_params(axis='y', nbins=4)
plt.locator_params(axis='x', nbins=4)
plt.title('MM regret on DeepSea\n($\zeta$ = {}, $N$ = {})'.format(agent.zeta, N), fontsize=24)
plt.xlabel('Time step $t$', fontsize=22)
plt.ylabel('Regret to oracle', fontsize=22)
plt.xlim([0, num_time_steps])
plt.xticks([0, num_time_steps // 2, num_time_steps])
plt.ylim([0, 600])
mu0, lamda, alpha, beta, zeta = agent.mu0, agent.lamda, agent.alpha, agent.beta, agent.zeta
save_name = 'mm-{}-{}-{}-{}-{}-regret-deepsea-{}'.format(mu0, lamda, alpha, beta, zeta, N)
save_name = save_name.replace('.', '_')
plt.savefig(fig_loc + save_name + '.pdf', bbox_inches='tight')
plt.show()
# -
| code/experiments/plot-posteriors-deepsea.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: 31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6
# name: python3
# ---
# ## Plot-ATL11-Tidal-Histograms
#
# Calculates histograms of ICESat-2 land ice elevation differences corrected for different tide models over Antarctic ice shelves
#
# #### Python Dependencies
# - [numpy: Scientific Computing Tools For Python](https://numpy.org)
# - [scipy: Scientific Tools for Python](https://docs.scipy.org/doc/)
# - [h5py: Python interface for Hierarchal Data Format 5 (HDF5)](https://h5py.org)
# - [pyproj: Python interface to PROJ library](https://pypi.org/project/pyproj/)
# - [matplotlib: Python 2D plotting library](http://matplotlib.org/)
#
# #### Program Dependencies
# - read_ICESat2_ATL11.py: reads ICESat-2 annual land ice height data files
# - time.py: utilities for calculating time operations
# #### Load necessary modules for running the notebook
# +
from __future__ import print_function
import sys
import os
import re
import pyproj
import logging
import datetime
import numpy as np
import scipy.stats
import matplotlib
matplotlib.rcParams['axes.linewidth'] = 2.0
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['Helvetica']
import matplotlib.pyplot as plt
import matplotlib.offsetbox
import ipywidgets as widgets
from icesat2_toolkit.read_ICESat2_ATL11 import read_HDF5_ATL11,read_HDF5_ATL11_pair
import icesat2_toolkit.time
logging.basicConfig(level=logging.INFO)
# -
# #### Get current list of available cycles
def cycles():
cycle_length = 2
# number of GPS seconds between the GPS epoch and ATLAS SDP epoch
atlas_sdp_gps_epoch = 1198800018.0
# number of GPS seconds since the GPS epoch for first ATLAS data point
atlas_gps_start_time = atlas_sdp_gps_epoch + 24710205.39202261
epoch1 = datetime.datetime(1980,1,6,0,0,0)
epoch2 = datetime.datetime(1970,1,1,0,0,0)
# get the total number of seconds since the start of ATLAS and now
delta_time_epochs = (epoch2 - epoch1).total_seconds()
atlas_UNIX_start_time = (atlas_gps_start_time - delta_time_epochs)
present_time = datetime.datetime.now().timestamp()
# divide total time by cycle length to get the maximum number of orbital cycles
ncycles = np.ceil((present_time - atlas_UNIX_start_time)/(86400*91)).astype('i')
return [str(c+1).zfill(cycle_length) for c in range(ncycles)]
# #### Set working data directory and histogram parameters
# +
# regional plot parameters
# x and y limit (in projection)
region_xlimit = {}
region_ylimit = {}
# Antarctica (AIS)
region_xlimit['AIS'] = (-3100000,3100000)
region_ylimit['AIS'] = (-2600000,2600000)
# Ronne/Filchner Ice Shelf
region_xlimit['FRIS'] = (-1535000,-520000)
region_ylimit['FRIS'] = (77500,1092500)
# Ross Ice Shelf
region_xlimit['RIS'] = (-740000,520000)
region_ylimit['RIS'] = (-1430000,-300000)
# Amery Ice Shelf
region_xlimit['AMIS'] = (1630000,2310000)
region_ylimit['AMIS'] = (530000,880000)
# Larsen-C Ice Shelf
region_xlimit['LCIS'] = (-2470000,-2050000)
region_ylimit['LCIS'] = (895000,1325000)
# Larsen-D Ice Shelf
region_xlimit['LDIS'] = (-2130000,-1595000)
region_ylimit['LDIS'] = (880000,1165000)
# George VI Ice Shelf
region_xlimit['G6IS'] = (-2230000,-1685000)
region_ylimit['G6IS'] = (320000,830000)
# Abbot Ice Shelf
region_xlimit['ABIS'] = (-2000000,-1800000)
region_ylimit['ABIS'] = (-460000,100000)
# Pine Island Ice Shelf
region_xlimit['PIIS'] = (-1695000,-1510000)
region_ylimit['PIIS'] = (-380000,-230000)
# Thwaites Glacier Tongue
region_xlimit['THWGT'] = (-1630000,-1480000)
region_ylimit['THWGT'] = (-525000,-370000)
# Dotson/Crosson Ice Shelf
region_xlimit['DCIS'] = (-1640000,-1460000)
region_ylimit['DCIS'] = (-715000,-525000)
# Wilkins Ice Shelf
region_xlimit['WLKIS'] = (-2180000,-1900000)
region_ylimit['WLKIS'] = (530000,795000)
# Wordie (Prospect) Ice Shelf
region_xlimit['WRDIS'] = (-2115000,-2042500)
region_ylimit['WRDIS'] = (830000,895000)
# Venable Ice Shelf
region_xlimit['VBLIS'] = (-1895000,-1800000)
region_ylimit['VBLIS'] = (22000,151000)
# set the directory with ICESat-2 data
dirText = widgets.Text(
value=os.getcwd(),
description='Directory',
disabled=False
)
# set the ICESat-2 ATL11 data release
releaseDropdown = widgets.Dropdown(
options=['001','002'],
value='002',
description='Release',
disabled=False
)
# set the ICESat-2 start and end cycles
all_cycles = cycles()
cycleSelect = widgets.SelectionRangeSlider(
options=all_cycles,
index=(2,len(all_cycles)-1),
description='Cycles',
disabled=False
)
# set the ICESat-2 granule regions
granuleSelect = widgets.SelectMultiple(
options=np.arange(1,15),
value=[10,11,12],
description='Granules',
disabled=False
)
# set the region to calculate histograms
regions = region_xlimit.keys()
regionDropdown = widgets.Dropdown(
options=regions,
value='FRIS',
description='Region',
disabled=False
)
# set the differencing method for histograms
methodDropdown = widgets.Dropdown(
options=['AT','XT'],
value='AT',
description='Method',
disabled=False
)
# display widgets for setting parameters
widgets.VBox([dirText,releaseDropdown,cycleSelect,granuleSelect,
regionDropdown,methodDropdown])
# -
# #### Find indices of common reference points between two lists
# Determines which along-track points correspond with the across-track
def common_reference_points(XT, AT):
ind2 = np.squeeze([np.flatnonzero(AT == p) for p in XT])
return ind2
# + tags=["outputPrepend"]
# get values from widgets
base_dir = os.path.expanduser(dirText.value)
RELEASE = releaseDropdown.value
CYCLES = cycleSelect.value
GRANULES = granuleSelect.value
REGION = regionDropdown.value
# read crossovers from ATL11 files
METHOD = methodDropdown.value
CROSSOVERS = (METHOD == 'XT')
# tide models to use
TIDE_MODELS = ['CATS2008','TPXO9-atlas-v3','GOT4.10','FES2014']
# height threshold (filter points below 0m elevation)
THRESHOLD = 0.0
# time threshold for crossover differences
DAYS = 10.0
# histogram parameters
w = 0.01
vmin,vmax=(-8,8)
b1 = np.arange(vmin,vmax+w,w)
b2 = (b1[1:] + b1[0:-1])/2.0
nbins = int((vmax-vmin)/w)
# total difference histogram for each tide model
hist = dict(Uncorrected=np.zeros((nbins)))
for m in TIDE_MODELS:
hist[m] = np.zeros((nbins))
# find ICESat-2 HDF5 files in the subdirectory for product and release
regex_track = '|'.join(['{0:04d}'.format(T) for T in range(1,1388)])
regex_granule = '|'.join(['{0:02d}'.format(G) for G in GRANULES])
# compile regular expression operator for extracting data from files
args = (regex_track,regex_granule,CYCLES[0],CYCLES[1],RELEASE)
regex_pattern = (r'(processed_)?(ATL\d{{2}})_({0})({1})_({2})({3})_'
r'({4})_(\d{{2}})(.*?).h5$')
rx = re.compile(regex_pattern.format(*args), re.VERBOSE)
# associated file format
file_format = '{0}_{1}_{2}_{3}{4}_{5}{6}_{7}_{8}{9}.h5'
# HDF5 group name for across-track data
XT = 'crossing_track_data'
# projections for converting lat/lon to polar stereographic
crs1 = pyproj.CRS.from_string("epsg:{0:d}".format(4326))
crs2 = pyproj.CRS.from_string("epsg:{0:d}".format(3031))
transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)
# find all input ATL11 files
FILE1 = [os.path.join(base_dir,f) for f in os.listdir(base_dir)
if bool(rx.match(f))]
# total number of valid segments for differencing method
total_valid = 0
# for each file in the cycle
for f1 in sorted(FILE1):
# extract parameters from file
SUB,PRD,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX = rx.findall(f1).pop()
# read ICESat-2 file
try:
mds1,attrs1,pairs1 = read_HDF5_ATL11(f1,
CROSSOVERS=CROSSOVERS,
ATTRIBUTES=True)
except:
continue
# for each beam in the file
for ptx in pairs1:
# extract along-track and across-track variables
ref_pt = {}
latitude = {}
longitude = {}
delta_time = {}
h_corr = {}
quality_summary = {}
tide_ocean = {m:{} for m in TIDE_MODELS}
ib = {}
groups = ['AT']
# shape of along-track data
n_points,n_cycles = mds1[ptx]['delta_time'].shape
# along-track (AT) reference point, latitude, longitude and time
ref_pt['AT'] = mds1[ptx]['ref_pt'].copy()
latitude['AT'] = np.ma.array(mds1[ptx]['latitude'],
fill_value=attrs1[ptx]['latitude']['_FillValue'])
latitude['AT'].mask = (latitude['AT'] == latitude['AT'].fill_value)
longitude['AT'] = np.ma.array(mds1[ptx]['longitude'],
fill_value=attrs1[ptx]['longitude']['_FillValue'])
longitude['AT'].mask = (longitude['AT'] == longitude['AT'].fill_value)
delta_time['AT'] = np.ma.array(mds1[ptx]['delta_time'],
fill_value=attrs1[ptx]['delta_time']['_FillValue'])
delta_time['AT'].mask = (delta_time['AT'] == delta_time['AT'].fill_value)
# corrected height
h_corr['AT'] = np.ma.array(mds1[ptx]['h_corr'],
fill_value=attrs1[ptx]['h_corr']['_FillValue'])
h_corr['AT'].mask = (h_corr['AT'] == h_corr['AT'].fill_value)
# quality summary
quality_summary['AT'] = (mds1[ptx]['quality_summary'] == 0)
# ocean corrections
for m in TIDE_MODELS:
tide_ocean[m]['AT'] = np.ma.zeros((n_points,n_cycles),
fill_value=attrs1[ptx]['cycle_stats']['tide_ocean']['_FillValue'])
tide_ocean[m]['AT'].mask = np.zeros((n_points,n_cycles),dtype=bool)
ib['AT'] = np.ma.array(mds1[ptx]['cycle_stats']['dac'],
fill_value=attrs1[ptx]['cycle_stats']['dac']['_FillValue'])
ib['AT'].mask = (ib['AT'] == ib['AT'].fill_value)
# if running ATL11 crossovers
if CROSSOVERS:
# add to group
groups.append('XT')
# shape of across-track data
n_cross, = mds1[ptx][XT]['delta_time'].shape
# across-track (XT) reference point
ref_pt['XT'] = mds1[ptx][XT]['ref_pt'].copy()
# across-track (XT) latitude, longitude and time
latitude['XT'] = np.ma.array(mds1[ptx][XT]['latitude'],
fill_value=attrs1[ptx][XT]['latitude']['_FillValue'])
latitude['XT'].mask = (latitude['XT'] == latitude['XT'].fill_value)
longitude['XT'] = np.ma.array(mds1[ptx][XT]['longitude'],
fill_value=attrs1[ptx][XT]['longitude']['_FillValue'])
longitude['XT'].mask = (longitude['XT'] == longitude['XT'].fill_value)
delta_time['XT'] = np.ma.array(mds1[ptx][XT]['delta_time'],
fill_value=attrs1[ptx][XT]['delta_time']['_FillValue'])
delta_time['XT'].mask = (delta_time['XT'] == delta_time['XT'].fill_value)
# corrected height
h_corr['XT'] = np.ma.array(mds1[ptx][XT]['h_corr'],
fill_value=attrs1[ptx][XT]['h_corr']['_FillValue'])
h_corr['XT'].mask = (h_corr['XT'] == h_corr['XT'].fill_value)
# quality summary
quality_summary['XT'] = (mds1[ptx][XT]['atl06_quality_summary'] == 0)
# ocean corrections
for m in TIDE_MODELS:
tide_ocean[m]['XT'] = np.ma.zeros((n_cross),
fill_value=attrs1[ptx][XT]['tide_ocean']['_FillValue'])
tide_ocean[m]['XT'].mask = np.zeros((n_cross),dtype=bool)
ib['XT'] = np.ma.array(mds1[ptx][XT]['dac'],
fill_value=attrs1[ptx][XT]['dac']['_FillValue'])
ib['XT'].mask = (ib['XT'] == ib['XT'].fill_value)
# ice shelf mask
a2 = (PRD,'ICE_SHELF','MASK',TRK,GRAN,SCYC,ECYC,RL,VERS,AUX)
f2 = os.path.join(base_dir,file_format.format(*a2))
# create data mask for ice shelves
mds1[ptx]['subsetting'] = {}
mds1[ptx]['subsetting'].setdefault('ice_shelf',
np.zeros((n_points),dtype=bool))
# check that mask file exists
try:
mds2,attr2 = read_HDF5_ATL11_pair(f2,ptx,
SUBSETTING=True)
except:
continue
else:
mds1[ptx]['subsetting']['ice_shelf'] = \
mds2[ptx]['subsetting']['ice_shelf']
# read height corrections from each tide model
for m in TIDE_MODELS:
# tide model
a3 = (PRD,m,'TIDES',TRK,GRAN,SCYC,ECYC,RL,VERS,AUX)
f3 = os.path.join(base_dir,file_format.format(*a3))
# check that tide file exists
try:
mds3,attr3 = read_HDF5_ATL11_pair(f3,ptx,
CROSSOVERS=CROSSOVERS)
except:
# mask all values
for group in groups:
tide_ocean[m][group].mask[:] = True
continue
else:
tide_ocean[m]['AT'].data[:] = \
mds3[ptx]['cycle_stats']['tide_ocean']
if CROSSOVERS:
tide_ocean[m]['XT'].data[:] = \
mds3[ptx][XT]['tide_ocean']
# set masks and fill values
for group,val in tide_ocean[m].items():
val.mask[:] = (val.data == val.fill_value)
val.data[val.mask] = val.fill_value
#-- check method of differencing
if (METHOD == 'AT'):
# if running along-track differences
difference_cycles = np.arange(n_cycles-1)
n_diff = np.copy(n_points)
# convert lat/lon to polar stereographic
X,Y = transformer.transform(longitude['AT'],latitude['AT'])
# run for all indices
ref_indices = Ellipsis
elif (METHOD == 'XT'):
# if running crossovers
difference_cycles = np.arange(n_cycles)
n_diff = np.copy(n_cross)
# convert lat/lon to polar stereographic
X,Y = transformer.transform(longitude['XT'],latitude['XT'])
# find mapping between crossover and along-track reference points
ref_indices = common_reference_points(ref_pt['XT'], ref_pt['AT'])
else:
difference_cycles = []
# for each cycle
for cycle in difference_cycles:
# fill value for invalid values
fv = attrs1[ptx]['h_corr']['_FillValue']
# copy annual land ice height variables
h1 = np.ma.array(mds1[ptx]['h_corr'][ref_indices,cycle],
fill_value=fv)
if CROSSOVERS:
h2 = np.ma.array(mds1[ptx][XT]['h_corr'][:],
fill_value=fv)
else:
h2 = np.ma.array(mds1[ptx]['h_corr'][:,cycle+1],
fill_value=fv)
# create masks for height variables
h1.mask = (h1.data == h1.fill_value)
h2.mask = (h2.data == h2.fill_value)
# # reference heights to geoid
# h1 -= mds1[ptx]['ref_surf']['geoid_h']
# h2 -= mds1[ptx]['ref_surf']['geoid_h'][ref_indices]
# correct heights for ocean variability
h1 -= ib['AT'][ref_indices,cycle]
if CROSSOVERS:
h2 -= ib['XT'][:]
else:
h2 -= ib['AT'][:,cycle+1]
# calculate corrected height differences
h_diff = np.ma.zeros((n_diff),fill_value=fv)
# set masks for invalid points
h_diff.mask = np.zeros((n_diff),dtype=bool)
# check if data is valid and within bounds
h_diff.mask |= (h1.mask | h2.mask)
h_diff.mask |= (np.abs(h1 - h2) > np.abs(vmin)) | \
(np.abs(h1 - h2) > np.abs(vmax))
# check if tide model is valid
for m in TIDE_MODELS:
h_diff.mask |= tide_ocean[m]['AT'].mask[ref_indices,cycle]
if CROSSOVERS:
h_diff.mask |= tide_ocean[m]['XT'].mask[:]
else:
h_diff.mask |= tide_ocean[m]['AT'].mask[:,cycle+1]
# check if IB correction is valid
h_diff.mask |= ib['AT'].mask[:,cycle]
if CROSSOVERS:
h_diff.mask |= ib['XT'].mask[:]
else:
h_diff.mask |= ib['AT'].mask[:,cycle+1]
# check if a low quality surface fit
h_diff.mask |= np.logical_not(quality_summary['AT'][ref_indices,cycle])
if CROSSOVERS:
h_diff.mask |= np.logical_not(quality_summary['XT'][:])
else:
h_diff.mask |= np.logical_not(quality_summary['AT'][:,cycle+1])
# check if not ice shelf
subset_mask = mds1[ptx]['subsetting']['ice_shelf'][ref_indices]
h_diff.mask |= np.logical_not(subset_mask)
# check if below height threshold
h_diff.mask |= (h1 <= THRESHOLD) | (h2 <= THRESHOLD)
# check if points are within bounds of plot
h_diff.mask |= (X < region_xlimit[REGION][0]) | \
(X > region_xlimit[REGION][1]) | \
(Y < region_ylimit[REGION][0]) | \
(Y > region_ylimit[REGION][1])
# check if crossover measurements are within time range
if CROSSOVERS:
# check versus threshold in days
dt = (delta_time['XT'] - delta_time['AT'][ref_indices,cycle])
h_diff.mask |= (np.abs(dt/86400.0) > DAYS)
h_diff.mask |= delta_time['AT'].mask[ref_indices,cycle]
h_diff.mask |= delta_time['XT'].mask[:]
# calculate elevation histogram for beam
if np.any(~h_diff.mask):
# calculate height difference
h_diff.data[:] = h2.data[:] - h1.data[:]
# histogram using numpy
hh,hb = np.histogram(h_diff.compressed(),bins=b1)
# add to total uncorrected histogram
hist['Uncorrected'] += hh.astype(np.float64)
#-- calculate tide-corrected height differences
for m in TIDE_MODELS:
to1 = np.copy(tide_ocean[m]['AT'][ref_indices,cycle])
if CROSSOVERS:
to2 = np.copy(tide_ocean[m]['XT'][:])
else:
to2 = np.copy(tide_ocean[m]['AT'][:,cycle+1])
# tide-corrected height difference
h_diff.data[:] = (h2 - to2) - (h1 - to1)
# histogram using numpy
hh,hb = np.histogram(h_diff.compressed(),bins=b1)
# add to total histogram
hist[m] += hh.astype(np.float64)
# add to valid segments
total_valid += np.count_nonzero(~h_diff.mask)
# +
# figure axes for output histogram plots
fig,ax = plt.subplots(ncols=5, sharey=True, figsize=(11,4))
plot_labels = ['a)','b)','c)','d)','e)']
# output file of histogram statistics
args = ('ATL11',METHOD,REGION,RELEASE)
FILE = '{0}_{1}_{2}_TIDAL_HISTOGRAM_{3}.txt'.format(*args)
fid = open(os.path.join(base_dir,FILE),'w')
print('Histogram Statistics ({0})'.format(METHOD), file=fid)
print('Minimum: {0:0.2f}'.format(vmin), file=fid)
print('Maximum: {0:0.2f}'.format(vmax), file=fid)
print('Width: {0:0.2f}'.format(w), file=fid)
print('Bins: {0:d}'.format(nbins), file=fid)
# print total number of points for differencing method
print('All Cycles: {0:d}\n'.format(total_valid), file=fid)
# create histogram plots
for i,key in enumerate(['Uncorrected',*TIDE_MODELS]):
# plot histograms
ax[i].plot(b2,hist[key],color='darkorchid',lw=1.5)
ax[i].fill_between(b2,hist[key],color='darkorchid',alpha=0.5)
# set title
ax[i].set_title(key)
# Add figure label
at = matplotlib.offsetbox.AnchoredText(plot_labels[i], loc=2, pad=0,
frameon=False, prop=dict(size=14,weight='bold',color='k'))
ax[i].axes.add_artist(at)
# add x labels
ax[i].set_xlabel('Elevation Difference [m]',labelpad=3)
# calculate histogram statistics
N = np.sum(hist[key])
# histogram mean and standard deviation
hmean = np.average(b2, weights=hist[key])
hvariance = np.average((b2-hmean)**2, weights=hist[key])
hstdev = np.sqrt(hvariance)
# histogram skewness and excess kurtosis
hskewness = np.average((b2-hmean)**3, weights=hist[key])/(hstdev**3)
hkurtosis = np.average((b2-hmean)**4, weights=hist[key])/(hstdev**4)
hkurtosis_excess = hkurtosis - 3.0
# omnibus chi-squared test of normality
mu1 = np.sqrt(6.0*N*(N-1.0)/(N-2.0)/(N+1.0)/(N+3.0))
mu2 = 2.0*mu1*np.sqrt((N*N-1.0)/(N-3.0)/(N+5.0))
chi2 = (hskewness/mu1)**2 + (hkurtosis_excess/mu2)**2
pvalue = 1.0 - scipy.stats.chi2.cdf(chi2,2)
# cumulative probability distribution function of histogram
cpdf = np.cumsum(hist[key]/np.sum(hist[key]))
# calculate percentiles for IQR and RDE
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
# median: 50th percentile
Q1,Q3,P16,P84,hmedian = np.interp([0.25,0.75,0.16,0.84,0.5],cpdf,b2)
# calculate interquartile range (IQR)
hIQR = 0.75*(Q3 - Q1)
# calculate robust dispersion estimator (RDE)
hRDE = 0.50*(P84 - P16)
# print model to file
print('{0}:'.format(key), file=fid)
# print statistics to file
print('\t{0}: {1:f}'.format('Mean',hmean), file=fid)
print('\t{0}: {1:f}'.format('Median',hmedian), file=fid)
print('\t{0}: {1:f}'.format('StDev',hstdev), file=fid)
print('\t{0}: {1:f}'.format('Skewness',hskewness), file=fid)
print('\t{0}: {1:f}'.format('Kurtosis',hkurtosis_excess), file=fid)
# print('\t{0}: {1:f}'.format('Normal',pvalue), file=fid)
# print median statistics to file
print('\t{0}: {1:f}'.format('IQR',hIQR), file=fid)
print('\t{0}: {1:f}'.format('RDE',hRDE), file=fid)
# output file of histogram for model
args = ('ATL11',METHOD,REGION,key,RELEASE)
HIST = '{0}_{1}_{2}_{3}_TIDAL_HISTOGRAM_{4}.txt'.format(*args)
fid1 = open(os.path.join(base_dir,HIST),'w')
# for each histogram bin
for bn,hst in zip(b2,hist[key]):
print('{0:0.02f} {1:0.0f}'.format(bn,hst),file=fid1)
# close model histogram file
fid1.close()
# close statistics file
fid.close()
# add y labels
ax[0].set_ylabel('Count', labelpad=10)
# set ylimits
ymin,ymax = ax[0].get_ylim()
ax[0].set_ylim(0,ymax)
# adjust plot to figure dimensions
fig.subplots_adjust(left=0.07,right=0.98,top=0.93,bottom=0.11,
wspace=0.12,hspace=0.20)
# output file format for each region type
args = ('ATL11',METHOD,REGION,RELEASE)
PLOT = '{0}_{1}_{2}_TIDAL_HISTOGRAM_{3}.pdf'.format(*args)
# save plot as png to the plot directory
print('\t--> {0}'.format(os.path.join(base_dir,PLOT)))
plt.savefig(os.path.join(base_dir,PLOT), format='pdf', dpi=720,
metadata={'Title':os.path.basename(sys.argv[0])})
# +
# figure axes for merged histogram plots
fig,ax = plt.subplots(num=2,figsize=(6,6))
# inset axes
axins = ax.inset_axes([0.65, 0.65, 0.33, 0.33])
# plot colors for histograms
COLORS = ['0.2','darkorchid','mediumseagreen','darkorange','dodgerblue']
# create merged histogram plot
for i,key in enumerate(['Uncorrected',*TIDE_MODELS]):
# plot histograms
ax.plot(b2,hist[key],color=COLORS[i],lw=1.5, label=key)
axins.plot(b2,hist[key],color=COLORS[i],lw=1.5)
# add legend
lgd = ax.legend(loc=2,frameon=False)
lgd.get_frame().set_alpha(1.0)
for line in lgd.get_lines():
line.set_linewidth(6)
# create sub region of the original plot
x1, x2, y1, y2 = (-0.10, 0.10, 260000, 275000)
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.set_xticklabels('')
axins.set_yticklabels('')
ax.indicate_inset_zoom(axins)
# add x and y labels
ax.set_xlabel('Elevation Difference [m]',labelpad=3)
ax.set_ylabel('Count', labelpad=10)
# set ylimits
ymin,ymax = ax.get_ylim()
ax.set_ylim(0,ymax)
# adjust plot to figure dimensions
fig.subplots_adjust(left=0.14,right=0.98,top=0.98,bottom=0.08)
# output file format
args = ('ATL11',METHOD,REGION,RELEASE)
PLOT = '{0}_{1}_{2}_TIDAL_HISTOGRAM_{3}_single.pdf'.format(*args)
# save plot as png to the plot directory
print('\t--> {0}'.format(os.path.join(base_dir,PLOT)))
plt.savefig(os.path.join(base_dir,PLOT), format='pdf', dpi=720,
metadata={'Title':os.path.basename(sys.argv[0])})
| notebooks/Plot ATL11 Tidal Histograms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:DAND]
# language: python
# name: conda-env-DAND-py
# ---
# + [markdown] deletable=true editable=true
# # Regression Week 5: Feature Selection and LASSO (Interpretation)
# + [markdown] deletable=true editable=true
# In this notebook, you will use LASSO to select features, building on a pre-implemented solver for LASSO (using GraphLab Create, though you can use other solvers). You will:
# * Run LASSO with different L1 penalties.
# * Choose best L1 penalty using a validation set.
# * Choose best L1 penalty using a validation set, with additional constraint on the size of subset.
#
# In the second notebook, you will implement your own LASSO solver, using coordinate descent.
# + [markdown] deletable=true editable=true
# # Fire up Graphlab Create
# + deletable=true editable=true
import graphlab
# + [markdown] deletable=true editable=true
# # Load in house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# + deletable=true editable=true
sales = graphlab.SFrame('kc_house_data.gl/')
# + [markdown] deletable=true editable=true
# # Create new features
# + [markdown] deletable=true editable=true
# As in Week 2, we consider features that are some transformations of inputs.
# + deletable=true editable=true
from math import log, sqrt
sales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)
sales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)
sales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']
# In the dataset, 'floors' was defined with type string,
# so we'll convert them to float, before creating a new feature.
sales['floors'] = sales['floors'].astype(float)
sales['floors_square'] = sales['floors']*sales['floors']
# + [markdown] deletable=true editable=true
# * Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.
# * On the other hand, taking square root of sqft_living will decrease the separation between big house and small house. The owner may not be exactly twice as happy for getting a house that is twice as big.
# + [markdown] deletable=true editable=true
# # Learn regression weights with L1 penalty
# + [markdown] deletable=true editable=true
# Let us fit a model with all the features available, plus the features we just created above.
# + deletable=true editable=true
all_features = ['bedrooms', 'bedrooms_square',
'bathrooms',
'sqft_living', 'sqft_living_sqrt',
'sqft_lot', 'sqft_lot_sqrt',
'floors', 'floors_square',
'waterfront', 'view', 'condition', 'grade',
'sqft_above',
'sqft_basement',
'yr_built', 'yr_renovated']
# + [markdown] deletable=true editable=true
# Applying L1 penalty requires adding an extra parameter (`l1_penalty`) to the linear regression call in GraphLab Create. (Other tools may have separate implementations of LASSO.) Note that it's important to set `l2_penalty=0` to ensure we don't introduce an additional L2 penalty.
# + deletable=true editable=true
model_all = graphlab.linear_regression.create(sales, target='price', features=all_features,
validation_set=None,
l2_penalty=0., l1_penalty=1e10)
# + [markdown] deletable=true editable=true
# Find what features had non-zero weight.
# + deletable=true editable=true
rows = len(all_features)
model_all.get('coefficients').print_rows(num_rows=rows)
# + [markdown] deletable=true editable=true
# Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection.
#
# ***QUIZ QUESTION***:
# According to this list of weights, which of the features have been chosen?
# + [markdown] deletable=true editable=true
# # Selecting an L1 penalty
# + [markdown] deletable=true editable=true
# To find a good L1 penalty, we will explore multiple values using a validation set. Let us do three way split into train, validation, and test sets:
# * Split our sales data into 2 sets: training and test
# * Further split our training data into two sets: train, validation
#
# Be *very* careful that you use seed = 1 to ensure you get the same answer!
# + deletable=true editable=true
(training_and_validation, testing) = sales.random_split(.9,seed=1) # initial train/test split
(training, validation) = training_and_validation.random_split(0.5, seed=1) # split training into train and validate
# + [markdown] deletable=true editable=true
# Next, we write a loop that does the following:
# * For `l1_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, type `np.logspace(1, 7, num=13)`.)
# * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list.
# * Compute the RSS on VALIDATION data (here you will want to use `.predict()`) for that `l1_penalty`
# * Report which `l1_penalty` produced the lowest RSS on validation data.
#
# When you call `linear_regression.create()` make sure you set `validation_set = None`.
#
# Note: you can turn off the print out of `linear_regression.create()` with `verbose = False`
# + deletable=true editable=true
import numpy as np
np_logspace = np.logspace(1,7,num=13)
num_penality = len(np_logspace)
RSS = np.empty(num_penality)
for i in xrange(num_penality):
model = graphlab.linear_regression.create(training,target='price',features=all_features,l1_penalty=np_logspace[i],l2_penalty=0,validation_set=None,verbose=False)
prediction = model.predict(validation)
errors = prediction - validation['price']
RSS[i] = np.dot(errors,errors)
print RSS
# + deletable=true editable=true
rss_min = RSS.min()
#print rss_min
for i in xrange(num_penality):
#print RSS[i]
#print np_logspace[i]
if RSS[i]==rss_min:
print np_logspace[i]
# + [markdown] deletable=true editable=true
# *** QUIZ QUESTION. *** What was the best value for the `l1_penalty`?
# + deletable=true editable=true
model_l1 = graphlab.linear_regression.create(training,target='price',features=all_features,l1_penalty=10,l2_penalty=0,validation_set=None,verbose=False)
model_l1.get('coefficients').print_rows(num_rows=rows)
model_l1.coefficients['value'].nnz()
# + [markdown] deletable=true editable=true
# ***QUIZ QUESTION***
# Also, using this value of L1 penalty, how many nonzero weights do you have?
# + [markdown] deletable=true editable=true
# # Limit the number of nonzero weights
#
# What if we absolutely wanted to limit ourselves to, say, 7 features? This may be important if we want to derive "a rule of thumb" --- an interpretable model that has only a few features in them.
# + [markdown] deletable=true editable=true
# In this section, you are going to implement a simple, two phase procedure to achive this goal:
# 1. Explore a large range of `l1_penalty` values to find a narrow region of `l1_penalty` values where models are likely to have the desired number of non-zero weights.
# 2. Further explore the narrow region you found to find a good value for `l1_penalty` that achieves the desired sparsity. Here, we will again use a validation set to choose the best value for `l1_penalty`.
# + deletable=true editable=true
max_nonzeros = 7
# + [markdown] deletable=true editable=true
# ## Exploring the larger range of values to find a narrow range with the desired sparsity
#
# Let's define a wide range of possible `l1_penalty_values`:
# + deletable=true editable=true
l1_penalty_values = np.logspace(8, 10, num=20)
# + [markdown] deletable=true editable=true
# Now, implement a loop that search through this space of possible `l1_penalty` values:
#
# * For `l1_penalty` in `np.logspace(8, 10, num=20)`:
# * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list. When you call `linear_regression.create()` make sure you set `validation_set = None`
# * Extract the weights of the model and count the number of nonzeros. Save the number of nonzeros to a list.
# * *Hint: `model['coefficients']['value']` gives you an SArray with the parameters you learned. If you call the method `.nnz()` on it, you will find the number of non-zero parameters!*
# + deletable=true editable=true
np_logspace = np.logspace(8, 10, num=20)
num_penality = len(np_logspace)
RSS = np.empty(num_penality)
for i in xrange(num_penality):
model = graphlab.linear_regression.create(training,target='price',features=all_features,l1_penalty=np_logspace[i],l2_penalty=0,validation_set=None,verbose=False)
prediction = model.predict(validation)
errors = prediction - validation['price']
RSS[i] = np.dot(errors,errors)
nnz = model['coefficients']['value'].nnz()
print str(i) + ': '+ str(nnz)
print RSS
# + deletable=true editable=true
print np_logspace
# + [markdown] deletable=true editable=true
# Out of this large range, we want to find the two ends of our desired narrow range of `l1_penalty`. At one end, we will have `l1_penalty` values that have too few non-zeros, and at the other end, we will have an `l1_penalty` that has too many non-zeros.
#
# More formally, find:
# * The largest `l1_penalty` that has more non-zeros than `max_nonzeros` (if we pick a penalty smaller than this value, we will definitely have too many non-zero weights)
# * Store this value in the variable `l1_penalty_min` (we will use it later)
# * The smallest `l1_penalty` that has fewer non-zeros than `max_nonzeros` (if we pick a penalty larger than this value, we will definitely have too few non-zero weights)
# * Store this value in the variable `l1_penalty_max` (we will use it later)
#
#
# *Hint: there are many ways to do this, e.g.:*
# * Programmatically within the loop above
# * Creating a list with the number of non-zeros for each value of `l1_penalty` and inspecting it to find the appropriate boundaries.
# + deletable=true editable=true
l1_penalty_min = np_logspace[14]
l1_penalty_max = np_logspace[15]
print l1_penalty_min
print l1_penalty_max
# + [markdown] deletable=true editable=true
# ***QUIZ QUESTION.*** What values did you find for `l1_penalty_min` and `l1_penalty_max`, respectively?
# + [markdown] deletable=true editable=true
# ## Exploring the narrow range of values to find the solution with the right number of non-zeros that has lowest RSS on the validation set
#
# We will now explore the narrow region of `l1_penalty` values we found:
# + deletable=true editable=true
l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)
# + [markdown] deletable=true editable=true
# * For `l1_penalty` in `np.linspace(l1_penalty_min,l1_penalty_max,20)`:
# * Fit a regression model with a given `l1_penalty` on TRAIN data. Specify `l1_penalty=l1_penalty` and `l2_penalty=0.` in the parameter list. When you call `linear_regression.create()` make sure you set `validation_set = None`
# * Measure the RSS of the learned model on the VALIDATION set
#
# Find the model that the lowest RSS on the VALIDATION set and has sparsity *equal* to `max_nonzeros`.
# + deletable=true editable=true
num_l1 = len(l1_penalty_values)
RSS = np.empty(num_l1)
for i in xrange(num_l1):
model = graphlab.linear_regression.create(training,target='price',features=all_features,l1_penalty=l1_penalty_values[i],l2_penalty=0,validation_set=None,verbose=False)
prediction = model.predict(validation)
errors = prediction - validation['price']
RSS[i] = np.dot(errors,errors)
nnz = model['coefficients']['value'].nnz()
print str(i) + ': '+ str(nnz)
print RSS
# + [markdown] deletable=true editable=true
# ***QUIZ QUESTIONS***
# 1. What value of `l1_penalty` in our narrow range has the lowest RSS on the VALIDATION set and has sparsity *equal* to `max_nonzeros`?
# 2. What features in this model have non-zero coefficients?
# + deletable=true editable=true
rss_min = RSS.min()
print rss_min
for i in xrange(num_l1):
#print RSS[i]
#print np_logspace[i]
if RSS[i]==rss_min:
print i
print l1_penalty_values[i]
# + deletable=true editable=true
rss_min2 = RSS[11:15].min()
print rss_min2
# + deletable=true editable=true
print RSS[11]
# + deletable=true editable=true
print l1_penalty_values[11]
# + deletable=true editable=true
model = graphlab.linear_regression.create(training,target='price',features=all_features,l1_penalty=l1_penalty_values[11],l2_penalty=0,validation_set=None,verbose=False)
model.get('coefficients').print_rows(num_rows=rows)
# + deletable=true editable=true
| Programming Assignment 7/week-5-lasso-assignment-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Using formulas to specify models
#
# All of the models can be specified using formulas. The formulas used here utilize [formulaic](https://matthewwardrop.github.io/formulaic/) are similar to those in [statsmodels](http://www.statsmodels.org). The basis formula syntax for a single variable regression would be
#
# ```
# y ~ 1 + x
# ```
#
# The formulas used with ``BetweenOLS``, ``PooledOLS`` and ``RandomEffects`` are completely standard and are identical to [statsmodels](http://www.statsmodels.org). ``FirstDifferenceOLS`` is nearly identical with the caveat that the model *cannot* include an intercept.
#
# ``PanelOLS``, which implements effects (entity, time or other) has a small extension to the formula which allows entity effects or time effects (or both) to be specified as part of the formula. While it is not possible to specify other effects using the formula interface, these can be included as an optional parameter when using a formula.
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Loading and preparing data
# When using formulas, a MultiIndex pandas dataframe where the index is entity-time is **required**. Here the Grunfeld data, from "The Determinants of Corporate Investment", provided by [statsmodels](http://www.statsmodels.org/stable/datasets/generated/grunfeld.html), is used to illustrate the use of formulas. This dataset contains data on firm investment, market value and the stock of plant capital.
#
# ``set_index`` is used to set the index using variables from the dataset.
# + pycharm={"name": "#%%\n"}
from statsmodels.datasets import grunfeld
data = grunfeld.load_pandas().data
data = data.set_index(["firm", "year"])
print(data.head())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## PanelOLS with Entity Effects
#
# Entity effects are specified using the special command `EntityEffects`. By default a constant is not included, and so if a constant is desired, `1+` should be included in the formula. When including effects, the model and fit are identical whether a constant is included or not.
# + pycharm={"name": "#%%\n"}
from linearmodels import PanelOLS
mod = PanelOLS.from_formula("invest ~ value + capital + EntityEffects", data=data)
print(mod.fit())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## PanelOLS with Entity Effects and a constant
#
# The constant can be explicitly included using the `1 + ` notation. When a constant is included in the model, and additional constraint is imposed that the number of the effects is 0. This allows the constant to be identified using the grand mean of the dependent and the regressors.
# + pycharm={"name": "#%%\n"}
mod = PanelOLS.from_formula("invest ~ 1 + value + capital + EntityEffects", data=data)
print(mod.fit())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Panel with Entity and Time Effects
#
# Time effects can be similarly included using `TimeEffect`. In many models, time effects can be consistently estimated and so they could be equivalently included in the set of regressors using a categorical variable.
# + pycharm={"name": "#%%\n"}
mod = PanelOLS.from_formula(
"invest ~ 1 + value + capital + EntityEffects + TimeEffects", data=data
)
print(mod.fit())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Between OLS
#
# The other panel models are straight-forward and are included for completeness.
# + pycharm={"name": "#%%\n"}
from linearmodels import BetweenOLS, FirstDifferenceOLS, PooledOLS
mod = BetweenOLS.from_formula("invest ~ 1 + value + capital", data=data)
print(mod.fit())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## First Difference OLS
#
# The first difference model must never include a constant since this is not identified after differencing.
# + pycharm={"name": "#%%\n"}
mod = FirstDifferenceOLS.from_formula("invest ~ value + capital", data=data)
print(mod.fit())
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Pooled OLS
#
# The pooled OLS estimator is a special case of `PanelOLS` when there are no effects. It is effectively identical to `OLS` in `statsmodels` (or `WLS`) but is included for completeness.
# + pycharm={"name": "#%%\n"}
mod = PooledOLS.from_formula("invest ~ 1 + value + capital", data=data)
print(mod.fit())
| examples/panel_using-formulas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
rawdatacsv="raw-word-data.csv"
shuffledcsv="shuffled_out_new.csv"
shuffledxls="shuffled_out_new.xls"
shuffledxls_pair_merge="shuffled_out_new_pair_merge.xls" # <---- new data set to use
# +
#instead of going through all n words and then shuffling - which results in n cards.
# Rather treat the words as a bag of words. Once word is removed it is removed
import csv
import random
import time
import pandas
import numpy as np
df = pandas.read_csv(rawdatacsv)
uniq = df.Concepts.unique()
np.random.shuffle(uniq)
# -
# is uniq divisible by the card size (4). if not pad with randoms from the first
def random_unique(data, duplicates=None):
""" Returns a random string from data that is not in the list of duplicates
Parameters
----------
data : np.array or list
data is the list of data to pull from
duplicates : list, optional
duplicates is what the random output must not match
Returns
-------
choice
a string
Edgecase? horrible case where duplicated contains more data than data and this will endlessly loop - I shoudn't hit that edge case
"""
choice=random.choice(data)
if duplicates:
if choice in duplicates:
choice=random_unique(data,duplicates)
return choice
def pad_data(data, card_size):
""" Based on the card size will pad the input data so that all rows are full.
e.g. 10 words and 4 words per card means 2 words padding and 12 words returned.
if mod is zero we're good
else if mod 1 pick a random and check not in last slice. add it.
Parameters
----------
data : np.array or list
data is the list of data to pull from
card_size : int
how many words per card.
Returns
-------
padded_data
a np.array or list with original data + random data needs to pad
"""
mod = len(data)%card_size
if mod == 0:
return data
else:
# pick n random not in the last part of the data
dataslice = data[0:len(data)-mod-1]
new_data=[]
for i in range(0,card_size-mod):
new_data.append(random_unique(dataslice, new_data))
padded_data = np.append(data,new_data)
return padded_data
# shuffle, pad, repeat by until max, drop dups and return
def generate_cards(data,card_size,max_loop, column_names=['word1', 'word2', 'word3', 'word4']):
""" Generate a set of data for creating cards in MS Word mail merge.
Parameters
----------
data : np.array or list
data is the list of data to pull from
card_size : int
how many words per card.
max_loop : int
how many times to loop over the data sets and create more random word combinations
column_names : list
the names of columns - these are column heading for the dataframe and needed later for mailmerge
Returns
-------
dataframe
a dataframe with card_size columns of shuffled words labelled as per column_names. dup rows removed.
"""
list_of_datasets = []
np.random.shuffle(data)
data_set = pad_data(data,card_size)
for i in range(0,max_loop):
random.seed(random.randint(0,1000000000)) # get a new random seed for each shuffle
np.random.shuffle(data) # shuffle data each time to create a new view of the dataset
list_of_datasets.append(pad_data(data,card_size))
data_set = np.concatenate(list_of_datasets)
reshapedata=data_set.reshape((int(len(data_set)/card_size)), card_size)
df = pandas.DataFrame(reshapedata, columns=column_names)
return df.drop_duplicates()
fdf = generate_cards(uniq,4,4)
fdf = fdf.rename_axis('index')
fdf[0:100].to_excel(shuffledxls)
fdf
# +
# generate as if 2 words per card and then combine, ensures words are found in both the discussion and drawing parts of the card
one_df = generate_cards(uniq,2,2,column_names=['word1', 'word2'])
two_df = generate_cards(uniq,2,2,column_names=['word3', 'word4'])
merged_df = one_df.merge(two_df, left_index=True, right_index=True)
merged_df = merged_df.rename_axis('index')
merged_df[0:100].to_excel(shuffledxls_pair_merge)
#len(pad_data(uniq,4))
# -
print(merged_df[0:100])
# +
# noting a poor performance issue - where some words are repeated too many times. Actually most words are seen 4 times. Some are seen only 3 and some seen 5 times.
# can create better stats but this is OK for now.
# the main issue is words that appear 4 times in the same spot and only in drawing or only discussing.
# using a different random seed seems to help but still the issue.
# consider running this as if on 2 cards and then combine.
# -
# check stats when using a 2 card merge
firsthundred = merged_df[0:100]
for key in uniq:
w1=list(firsthundred.word1).count(key)
w2=list(firsthundred.word2).count(key)
w3=list(firsthundred.word3).count(key)
w4=list(firsthundred.word4).count(key)
print(key,w1,w2,w3,w4,w1+w2+w3+w4)
# +
#check stats when using 4 words per card
firsthundred = fdf[0:100]
for key in uniq:
w1=list(firsthundred.word1).count(key)
w2=list(firsthundred.word2).count(key)
w3=list(firsthundred.word3).count(key)
w4=list(firsthundred.word4).count(key)
print(key,w1,w2,w3,w4,w1+w2+w3+w4)
| cards/random_to_csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # Notion - Update page
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Notion/Notion_Update_page.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
# + [markdown] papermill={} tags=[]
# **Tags:** #notion #productivity #naas_drivers
# + [markdown] papermill={} tags=[]
# **Author:** [<NAME>](https://www.linkedin.com/in/maximejublou)
# + [markdown] papermill={} tags=[]
# This notebook shows how to use Naas Notion driver to update a page (properties + content) inside a database.
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Import libraries
# + papermill={} tags=[]
from naas_drivers import notion
# + [markdown] papermill={} tags=[]
# ### Input variables
# <a href='https://docs.naas.ai/drivers/notion'>How to get your Notion integration token ?</a>
# + papermill={} tags=[]
# Enter Token API
token = "*****"
# Enter Database URL
page_url = "https://www.notion.so/naas-official/Daily-med-03952fcb93c045bba519a7564a64045e"
# + [markdown] papermill={} tags=[]
# ## Model
# + [markdown] papermill={} tags=[]
# ### Get page
# Get your page content and returns a dataframe with name of column, type and value.
# + papermill={} tags=[]
page = notion.connect(token).page.get(page_url)
page
# + [markdown] papermill={} tags=[]
# ### Update page properties
# Properties are associated with the database. If you put a page type that is not currently present, it will create it.
# + papermill={} tags=[]
page.title("Name","Page updated")
page.rich_text("Text","Ceci est toto")
page.number("Number", 42)
page.select("Select","Value3")
page.multi_select("Muti Select",["Value1","Value2","Value3"])
page.date("Date","2021-10-03T17:01:26") #Follow ISO 8601 format
page.people("People", ["6e3bab71-beeb-484b-af99-ea30fdef4773"]) #list of ID of users
page.checkbox("Checkbox", False)
page.email("Email","<EMAIL>")
page.phone_number("Phone number","+33 6 21 83 11 12")
page.update()
# + [markdown] papermill={} tags=[]
# ### Update page blocks
# Blocks are the content of the page.
# + papermill={} tags=[]
page.heading_1("Heading 1")
page.heading_2("Heading 2")
page.heading_3("Heading 3")
page.paragraph("Paragraph")
page.numbered_list_item("This is first")
page.to_do("Need this to be done")
page.embed("https://docs.google.com/spreadsheets/*************")
page.video("https://www.youtube.com/watch?v=8AsMAc4VFJs")
page.image("https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png")
page.code("pip install naas")
page.equation("e=mc2")
page.update()
# + [markdown] papermill={} tags=[]
# ## Output
# + papermill={} tags=[]
page
| Notion/Notion_Update_page.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # 연립방정식과 역행렬
# 다음과 같이 $x_1, x_2, \cdots, x_n$ 이라는 $n$ 개의 미지수를 가지는 방정식을 연립 방정식(system of equations)이라고 한다.
#
# $$
# \begin{matrix}
# a_{11} x_1 & + \;& a_{12} x_2 &\; + \cdots + \;& a_{1M} x_M &\; = \;& b_1 \\
# a_{21} x_1 & + \;& a_{22} x_2 &\; + \cdots + \;& a_{2M} x_M &\; = \;& b_2 \\
# \vdots\;\;\; & & \vdots\;\;\; & & \vdots\;\;\; & & \;\vdots \\
# a_{N1} x_1 & + \;& a_{N2} x_2 &\; + \cdots + \;& a_{NM} x_M &\; = \;& b_N \\
# \end{matrix}
# $$
#
# 행렬의 곱셈을 이용하면 이 연립 방정식은 다음과 같이 간단하게 쓸 수 있다.
# $$ Ax = b $$
# 이 식에서 $A, x, b$ 는 다음과 같이 정의한다.
#
# $$
# A =
# \begin{bmatrix}
# a_{11} & a_{12} & \cdots & a_{1M} \\
# a_{21} & a_{22} & \cdots & a_{2M} \\
# \vdots & \vdots & \ddots & \vdots \\
# a_{N1} & a_{N2} & \cdots & a_{NM} \\
# \end{bmatrix}
# $$
#
#
# $$
# x =
# \begin{bmatrix}
# x_1 \\ x_2 \\ \vdots \\ x_M
# \end{bmatrix}
# $$
#
#
# $$
# b=
# \begin{bmatrix}
# b_1 \\ b_2 \\ \vdots \\ b_N
# \end{bmatrix}
# $$
#
# $$
# Ax = b
# \;\;\;\;\;
# \rightarrow
# \;\;\;\;\;
# \begin{bmatrix}
# a_{11} & a_{12} & \cdots & a_{1M} \\
# a_{21} & a_{22} & \cdots & a_{2M} \\
# \vdots & \vdots & \ddots & \vdots \\
# a_{N1} & a_{N2} & \cdots & a_{NM} \\
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\ x_2 \\ \vdots \\ x_M
# \end{bmatrix}
# =
# \begin{bmatrix}
# b_1 \\ b_2 \\ \vdots \\ b_N
# \end{bmatrix}
# $$
#
# 만약 $A, x, b$가 행렬이 아닌 실수라면 이 식은 나눗셈을 사용하여 다음과 같이 쉽게 풀 수 있을 것이다.
#
# $$ x = \dfrac{b}{A} $$
#
# 그러나 행렬은 나눗셈이 정의되지 않으므로 이 식은 사용할 수 없다. 대신 역행렬(inverse)을 사용하여 이 식을 쉽게 풀 수 있다.
# ## 역행렬
# 정방 행렬(square matrix) $A\;(A \in \mathbb{R}^{M \times M}) $ 에 대한 역행렬은 $A^{-1}$ 이란 기호로 표시한다.
#
# 역행렬 $A^{-1}$은 원래의 행렬 $A$와 다음 관계를 만족하는 정방 행렬을 말한다. $I$는 단위 행렬(identity matrix)이다.
#
# $$ A^{-1} A = A A^{-1} = I $$
# 두 개 이상의 정방 행렬의 곱은 마찬가지로 같은 크기의 정방행렬이 되는데 이러한 행렬의 곱의 역행렬에 대해서는 다음 성질이 성립한다.
#
# $$ (AB)^{-1} = B^{-1} A^{-1} $$
#
# $$ (ABC)^{-1} = C^{-1} B^{-1} A^{-1} $$
# ## 역행렬과 연립 방정식의 해
# 미지수의 수와 방정식의 수가 같다면 행렬 $A$ 는 정방 행렬이 된다.
#
#
# 만약 행렬 $A$의 역행렬 $ A^{-1} $ 이 존재한다면 역행렬의 정의에서 연립 방정식의 해는 다음과 같이 구해진다.
#
# $$ Ax = b $$
#
#
# $$ A^{-1}Ax = A^{-1}b $$
#
#
# $$ Ix = A^{-1}b $$
#
#
# $$ x = A^{-1}b $$
#
# ## NumPy의 역행렬 계산
# NumPy의 linalg 서브패키지에는 역행렬을 구하는 `inv` 라는 명령어가 존재한다. 그러나 실제 계산시에는 수치해석 상의 여러가지 문제로 `inv` 명령어 보다는 `lstsq` (least square) 명령어를 사용한다.
A = np.array([[1, 3, -2], [3, 5, 6], [2, 4, 3]])
A
b = np.array([[5], [7], [8]])
b
Ainv = np.linalg.inv(A)
Ainv
x = np.dot(Ainv, b) # 단순 곱하기를 하면 안되욧-! 3 x 3으로 나옴
x
np.dot(A, x) - b # 정답이 맞나 검증. 여기서 수치적 에러로 0이 나오지 않은 것임
x, resid, rank, s = np.linalg.lstsq(A, b) # 리스트 스쿠케어(최소자승)
x
# 위 해결 방법에는 두 가지 의문이 존재한다. 우선 역행렬이 존재하는지 어떻게 알 수 있는가? 또 두 번째 만약 미지수의 수와 방정식의 수가 다르다면 어떻게 되는가?
# ## 행렬식
# 우선 역행렬이 존재하는지 알아보는 방법의 하나로 행렬식(determinant)라는 정방 행렬의 특징을 계산하는 방법이다. 행렬 $A$ 에 대한 행렬식은 $\text{det}A$라는 기호로 표기한다.
#
#
# 행렬식(determinant)의 수학적인 정의는 상당히 복잡하므로 여기에서는 생략한다. 다만 몇가지 크기의 정방 행렬에 대해서는 다음과 같은 수식으로 구할 수 있다.
#
#
#
# * 1×1 행렬의 행렬식
#
# $$\det\begin{bmatrix}a\end{bmatrix}=a$$
#
# * 2×2 행렬의 행렬식
# $$\det\begin{bmatrix}a&b\\c&d\end{bmatrix}=ad-bc$$
#
# * 3×3 행렬의 행렬식
# $$\det\begin{bmatrix}a&b&c\\d&e&f\\g&h&i\end{bmatrix}=aei+bfg+cdh-ceg-bdi-afh$$
#
# NumPy에서는 `det` 명령으로 행렬식의 값을 구할 수 있다.
np.random.seed(0)
A = np.random.randn(3, 3)
A
np.linalg.det(A)
# 행렬식과 역행렬 사이에는 다음의 관계가 있다.
# ** 행렬식의 값이 0이 아니면 역행렬이 존재한다. 반대로 역행렬이 존재하면 행렬식의 값은 0이 아니다. **
# ## 최소 자승 문제
# 연립 방정식은 다음과 같은 세 종류가 있다.
#
# 1. 미지수의 수가 방정식의 수와 같다. ($N = M$)
# 2. 미지수의 수가 방정식의 수보다 적다. ($N < M$)
# 3. 미지수의 수가 방정식의 수보다 많다. ($N > M$)
# 1번의 경우는 앞에서 다루었다. 2번의 경우에는 너무 많은 해가 존재할 수 있다. 3번의 경우에는 2번과 반대로 모든 조건을 만족하는 해가 하나도 존재할 수 없을 수도 있다.
# 그런데 데이터 분석 문제에서는 $A$ 를 feature matrix, $x$ 를 가중치 벡터 $w$ 라고 보았을 때 데이터의 수가 가중치의 갯수보다 많은 경우가 일반적이다. 다만 이 때는 방정식이 정확하게 등호를 이루기를 바라지는 않는다. 즉, 대략적으로만 좌변과 우변이 비슷하면 되는 경우이다.
#
# $$ Ax \approx b $$
# 이 경우에는 좌변과 우변의 차이를 최소하하는 문제로 바꾸어 풀 수 있다. (오차 측정)
#
# $$ e = Ax-b $$
#
# $$ e^Te = (Ax-b)^T(Ax-b) $$
#
#
# $$ x^* = \text{arg} \min_x e^Te = \text{arg} \min_x \; (Ax-b)^T(Ax-b) $$
# 이러한 문제를 **최소 자승(Least Square)** 문제라고 한다. 그러나 정방행렬이 아님
# 최소 자승 문제의 답은 $A^TA$ 는 항상 정방행렬이 된다는 점을 사용하여 다음과 같이 풀 수 있다.
#
#
# $$ Ax = b $$
#
# $$ A^TAx = A^Tb $$
#
#
# $$ (A^TA)x = A^Tb $$
#
# $$ x = (A^TA)^{-1}A^T b $$
#
# $$ x = ((A^TA)^{-1}A^T) b $$
# 이 값이 최소 자승 문제의 답이 된다는 것은 행렬의 미분을 사용하여 증명할 수 있다. 여기에서 행렬 $(A^TA)^{-1}A^T$ 를 행렬 $A$ 의 의사 역행렬(pseudo inverse)라고 하며 다음과 같이 $ A^{+}$ 로 표기하기도 한다.
#
# $$ A^{+} = (A^TA)^{-1}A^T $$
# NumPy의 `lstsq` 명령은 사실 이러한 최소 자승 문제를 푸는 명령이다.
A = np.array([[2, 0], [-1, 1], [0, 2]])
A
b = np.array([[1], [0], [-1]])
b
Apinv = np.dot(np.linalg.inv(np.dot(A.T, A)), A.T) # p.inverse() ? 이런 명령
Apinv
x = np.dot(Apinv, b)
x
np.dot(A, x) - b
x, resid, rank, s = np.linalg.lstsq(A, b) # 정방행렬이 아닌 경우도 사용이 가능함. resid = 에러, rank
x
# minimize인지 확인하는 방법은 행렬의 미적을 실행해봐야함
| 06. 기초 선형대수/06. 연립방정식과 역행렬.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"}
# # Convolutional Neural Network
# + [markdown] deletable=true editable=true slideshow={"slide_type": "skip"}
# ### References:
#
# Some of the images and the content I used came from this great couple of blog posts \[1\] [https://adeshpande3.github.io/adeshpande3.github.io/]() and \[2\] the terrific book, ["Neural Networks and Deep Learning"](http://neuralnetworksanddeeplearning.com/) by <NAME>. (**Strongly recommend**)
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# A convolutional neural network (CNN, or ConvNet) is a type of **feed-forward** artificial neural network in which the connectivity pattern between its neurons is inspired by the organization of the animal visual cortex.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# The networks consist of multiple layers of small neuron collections which process portions of the input image, called **receptive fields**.
#
# The outputs of these collections are then tiled so that their input regions overlap, to obtain a _better representation_ of the original image; this is repeated for every such layer.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## How does it look like?
# + [markdown] deletable=true editable=true slideshow={"slide_type": "-"}
# <img src="imgs/convnets_cover.png" width="70%" />
#
# > source: https://flickrcode.files.wordpress.com/2014/10/conv-net2.png
# + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"}
# # The Problem Space
#
# ## Image Classification
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# Image classification is the task of taking an input image and outputting a class (a cat, dog, etc) or a probability of classes that best describes the image.
#
# For humans, this task of recognition is one of the first skills we learn from the moment we are born and is one that comes naturally and effortlessly as adults.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# These skills of being able to quickly recognize patterns, *generalize* from prior knowledge, and adapt to different image environments are ones that we do not share with machines.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Inputs and Outputs
# + [markdown] deletable=true editable=true
# <img src="imgs/cnn1.png" width="70%" />
#
# source: [http://www.pawbuzz.com/wp-content/uploads/sites/551/2014/11/corgi-puppies-21.jpg]()
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# When a computer sees an image (takes an image as input), it will see an array of pixel values.
#
# Depending on the resolution and size of the image, it will see a 32 x 32 x 3 array of numbers (The 3 refers to RGB values).
#
# let's say we have a color image in JPG form and its size is 480 x 480. The representative array will be 480 x 480 x 3. Each of these numbers is given a value from 0 to 255 which describes the pixel intensity at that point.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Goal
# + [markdown] deletable=true editable=true
# What we want the computer to do is to be able to differentiate between all the images it’s given and figure out the unique features that make a dog a dog or that make a cat a cat.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# When we look at a picture of a dog, we can classify it as such if the picture has identifiable features such as paws or 4 legs.
#
# In a similar way, the computer should be able to perform image classification by looking for *low level* features such as edges and curves, and then building up to more abstract concepts through a series of **convolutional layers**.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Structure of a CNN
# + [markdown] deletable=true editable=true
# > A more detailed overview of what CNNs do would be that you take the image, pass it through a series of convolutional, nonlinear, pooling (downsampling), and fully connected layers, and get an output. As we said earlier, the output can be a single class or a probability of classes that best describes the image.
#
# source: [1]
# + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"}
# # Convolutional Layer
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# The first layer in a CNN is always a **Convolutional Layer**.
# + [markdown] deletable=true editable=true
# <img src="imgs/conv.png" width="50%">
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ### Convolutional filters
#
#
# + [markdown] deletable=true editable=true
# A Convolutional Filter much like a **kernel** in image recognition is a small matrix useful for blurring, sharpening, embossing, edge detection, and more.
#
# This is accomplished by means of convolution between a kernel and an image.
#
# #### The main difference _here_ is that the conv matrices are **learned**.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# As the filter is sliding, or **convolving**, around the input image, it is multiplying the values in the filter with the original pixel values of the image <br>
# (a.k.a. computing **element wise multiplications**).
# + [markdown] deletable=true editable=true
# <img src="imgs/cnn2.png" width="80%">
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# Now, we repeat this process for every location on the input volume. (Next step would be moving the filter to the right by 1 unit, then right again by 1, and so on).
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# After sliding the filter over all the locations, we are left with an array of numbers usually called an **activation map** or **feature map**.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## High Level Perspective
#
# Let’s talk about briefly what this convolution is actually doing from a high level.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# Each of these filters can be thought of as **feature identifiers** (e.g. *straight edges, simple colors, curves*)
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# <img src="imgs/cnn3.png" width="70%" />
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ### Visualisation of the Receptive Field
# + [markdown] deletable=true editable=true
# <img src="imgs/cnn4.png" width="80%" />
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# <img src="imgs/cnn5.png" width="80%" />
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# <img src="imgs/cnn6.png" width="80%" />
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# The value is much lower! This is because there wasn’t anything in the image section that responded to the curve detector filter. Remember, the output of this conv layer is an activation map.
#
# + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"}
# # Going Deeper Through the Network
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# Now in a traditional **convolutional neural network** architecture, there are other layers that are interspersed between these conv layers.
#
# <img src="https://adeshpande3.github.io/assets/Table.png"/>
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## ReLU (Rectified Linear Units) Layer
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# After each conv layer, it is convention to apply a *nonlinear layer* (or **activation layer**) immediately afterward.
#
#
# The purpose of this layer is to introduce nonlinearity to a system that basically has just been computing linear operations during the conv layers (just element wise multiplications and summations)
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# In the past, nonlinear functions like tanh and sigmoid were used, but researchers found out that **ReLU layers** work far better because the network is able to train a lot faster (because of the computational efficiency) without making a significant difference to the accuracy.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# It also helps to alleviate the **vanishing gradient problem**, which is the issue where the lower layers of the network train very slowly because the gradient decreases exponentially through the layers
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# (**very briefly**)
#
# Vanishing gradient problem depends on the choice of the activation function.
#
# Many common activation functions (e.g `sigmoid` or `tanh`) *squash* their input into a very small output range in a very non-linear fashion.
#
# For example, sigmoid maps the real number line onto a "small" range of [0, 1].
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# As a result, there are large regions of the input space which are mapped to an extremely small range.
#
# In these regions of the input space, even a large change in the input will produce a small change in the output - hence the **gradient is small**.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ### ReLu
#
# The **ReLu** function is defined as $f(x) = \max(0, x),$ [2]
#
# A smooth approximation to the rectifier is the *analytic function*: $f(x) = \ln(1 + e^x)$
#
# which is called the **softplus** function.
#
# The derivative of softplus is $f'(x) = e^x / (e^x + 1) = 1 / (1 + e^{-x})$, i.e. the **logistic function**.
#
# [2] [http://www.cs.toronto.edu/~fritz/absps/reluICML.pdf]() by <NAME>
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Pooling Layers
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# After some ReLU layers, it is customary to apply a **pooling layer** (aka *downsampling layer*).
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# In this category, there are also several layer options, with **maxpooling** being the most popular.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# Example of a MaxPooling filter
# + [markdown] deletable=true editable=true
# <img src="imgs/MaxPool.png" width="80%" />
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# Other options for pooling layers are average pooling and L2-norm pooling.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# The intuition behind this Pooling layer is that once we know that a specific feature is in the original input volume (there will be a high activation value), its exact location is not as important as its relative location to the other features.
#
# Therefore this layer drastically reduces the spatial dimension (the length and the width but not the depth) of the input volume.
#
# This serves two main purposes: reduce the amount of parameters; controlling overfitting.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# An intuitive explanation for the usefulness of pooling could be explained by an example:
#
# Lets assume that we have a filter that is used for detecting faces. The exact pixel location of the face is less relevant then the fact that there is a face "somewhere at the top"
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Dropout Layer
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# The **dropout layers** have the very specific function to *drop out* a random set of activations in that layers by setting them to zero in the forward pass. Simple as that.
#
# It allows to avoid *overfitting* but has to be used **only** at training time and **not** at test time.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Fully Connected Layer
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# The last layer, however, is an important one, namely the **Fully Connected Layer**.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# Basically, a FC layer looks at what high level features most strongly correlate to a particular class and has particular weights so that when you compute the products between the weights and the previous layer, you get the correct probabilities for the different classes.
# + [markdown] deletable=true editable=true
# <img src="imgs/ConvNet LeNet.png" width="90%" />
# -
# ## Going further: Convolution Arithmetic
# If you want to go further with Convolution and you want to fully understand how convolution works with all the details we omitted in this notebook, I strongly suggest to read this **terrific** paper: [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285).
#
# This paper is also referenced (with animations) in the `theano` main documentation: [convnet tutorial](http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html)
# ---
# + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"}
# # CNN in Keras
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# **Keras** has an extensive support for Convolutional Layers:
#
# - 1D Convolutional Layers;
# - 2D Convolutional Layers;
# - 3D Convolutional Layers;
# - Depthwise Convolution;
# - Transpose Convolution;
# - ....
#
# The corresponding `keras` package is `keras.layers.convolutional`.
#
# Take a look at the [Convolutional Layers](https://keras.io/layers/convolutional/) documentation to know more about Conv Layers that are missing in this notebook.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# #### Convolution1D
#
# ```python
# from keras.layers.convolutional import Conv1D
#
# Conv1D(filters, kernel_size, strides=1, padding='valid',
# dilation_rate=1, activation=None, use_bias=True,
# kernel_initializer='glorot_uniform', bias_initializer='zeros',
# kernel_regularizer=None, bias_regularizer=None,
# activity_regularizer=None, kernel_constraint=None,
# bias_constraint=None)
# ```
#
# #### Arguments:
#
# <ul>
# <li><strong>filters</strong>: Integer, the dimensionality of the output space
# (i.e. the number output of filters in the convolution).</li>
# <li><strong>kernel_size</strong>: An integer or tuple/list of a single integer,
# specifying the length of the 1D convolution window.</li>
# <li><strong>strides</strong>: An integer or tuple/list of a single integer,
# specifying the stride length of the convolution.
# Specifying any stride value != 1 is incompatible with specifying
# any <code>dilation_rate</code> value != 1.</li>
# <li><strong>padding</strong>: One of <code>"valid"</code>, <code>"causal"</code> or <code>"same"</code> (case-insensitive).
# <code>"causal"</code> results in causal (dilated) convolutions, e.g. output[t]
# does not depend on input[t+1:]. Useful when modeling temporal data
# where the model should not violate the temporal order.
# See <a href="https://arxiv.org/abs/1609.03499">WaveNet: A Generative Model for Raw Audio, section 2.1</a>.</li>
# <li><strong>dilation_rate</strong>: an integer or tuple/list of a single integer, specifying
# the dilation rate to use for dilated convolution.
# Currently, specifying any <code>dilation_rate</code> value != 1 is
# incompatible with specifying any <code>strides</code> value != 1.</li>
# <li><strong>activation</strong>: Activation function to use
# (see <a href="https://keras.io/activations/">activations</a>).
# If you don't specify anything, no activation is applied
# (ie. "linear" activation: <code>a(x) = x</code>).</li>
# <li><strong>use_bias</strong>: Boolean, whether the layer uses a bias vector.</li>
# <li><strong>kernel_initializer</strong>: Initializer for the <code>kernel</code> weights matrix
# (see <a href="https://keras.io/initializers/">initializers</a>).</li>
# <li><strong>bias_initializer</strong>: Initializer for the bias vector
# (see <a href="https://keras.io/initializers/">initializers</a>).</li>
# <li><strong>kernel_regularizer</strong>: Regularizer function applied to
# the <code>kernel</code> weights matrix
# (see <a href="https://keras.io/regularizers/">regularizer</a>).</li>
# <li><strong>bias_regularizer</strong>: Regularizer function applied to the bias vector
# (see <a href="https://keras.io/regularizers/">regularizer</a>).</li>
# <li><strong>activity_regularizer</strong>: Regularizer function applied to
# the output of the layer (its "activation").
# (see <a href="https://keras.io/regularizers/">regularizer</a>).</li>
# <li><strong>kernel_constraint</strong>: Constraint function applied to the kernel matrix
# (see <a href="https://keras.io/constraints/">constraints</a>).</li>
# <li><strong>bias_constraint</strong>: Constraint function applied to the bias vector
# (see <a href="https://keras.io/constraints/">constraints</a>).</li>
# </ul>
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# >Convolution operator for filtering neighborhoods of **one-dimensional inputs**. When using this layer as the first layer in a model, either provide the keyword argument `input_dim` (int, e.g. 128 for sequences of 128-dimensional vectors), or `input_shape` (tuple of integers, e.g. (10, 128) for sequences of 10 vectors of 128-dimensional vectors).
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# #### Example
#
# ```python
#
# # apply a convolution 1d of length 3 to a sequence with 10 timesteps,
# # with 64 output filters
# model = Sequential()
# model.add(Conv1D(64, 3, padding='same', input_shape=(10, 32)))
# # now model.output_shape == (None, 10, 64)
#
# # add a new conv1d on top
# model.add(Conv1D(32, 3, padding='same'))
# # now model.output_shape == (None, 10, 32)
# ```
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# #### Convolution2D
#
# ```python
# from keras.layers.convolutional import Conv2D
#
# Conv2D(filters, kernel_size, strides=(1, 1), padding='valid',
# data_format=None, dilation_rate=(1, 1), activation=None,
# use_bias=True, kernel_initializer='glorot_uniform',
# bias_initializer='zeros', kernel_regularizer=None,
# bias_regularizer=None, activity_regularizer=None,
# kernel_constraint=None, bias_constraint=None)
# ```
#
# #### Arguments:
#
# <ul>
# <li><strong>filters</strong>: Integer, the dimensionality of the output space
# (i.e. the number output of filters in the convolution).</li>
# <li><strong>kernel_size</strong>: An integer or tuple/list of 2 integers, specifying the
# width and height of the 2D convolution window.
# Can be a single integer to specify the same value for
# all spatial dimensions.</li>
# <li><strong>strides</strong>: An integer or tuple/list of 2 integers,
# specifying the strides of the convolution along the width and height.
# Can be a single integer to specify the same value for
# all spatial dimensions.
# Specifying any stride value != 1 is incompatible with specifying
# any <code>dilation_rate</code> value != 1.</li>
# <li><strong>padding</strong>: one of <code>"valid"</code> or <code>"same"</code> (case-insensitive).</li>
# <li><strong>data_format</strong>: A string,
# one of <code>channels_last</code> (default) or <code>channels_first</code>.
# The ordering of the dimensions in the inputs.
# <code>channels_last</code> corresponds to inputs with shape
# <code>(batch, height, width, channels)</code> while <code>channels_first</code>
# corresponds to inputs with shape
# <code>(batch, channels, height, width)</code>.
# It defaults to the <code>image_data_format</code> value found in your
# Keras config file at <code>~/.keras/keras.json</code>.
# If you never set it, then it will be "channels_last".</li>
# <li><strong>dilation_rate</strong>: an integer or tuple/list of 2 integers, specifying
# the dilation rate to use for dilated convolution.
# Can be a single integer to specify the same value for
# all spatial dimensions.
# Currently, specifying any <code>dilation_rate</code> value != 1 is
# incompatible with specifying any stride value != 1.</li>
# <li><strong>activation</strong>: Activation function to use
# (see <a href="https://keras.io/activations/">activations</a>).
# If you don't specify anything, no activation is applied
# (ie. "linear" activation: <code>a(x) = x</code>).</li>
# <li><strong>use_bias</strong>: Boolean, whether the layer uses a bias vector.</li>
# <li><strong>kernel_initializer</strong>: Initializer for the <code>kernel</code> weights matrix
# (see <a href="https://keras.io/initializers/">initializers</a>).</li>
# <li><strong>bias_initializer</strong>: Initializer for the bias vector
# (see <a href="https://keras.io/initializers/">initializers</a>).</li>
# <li><strong>kernel_regularizer</strong>: Regularizer function applied to
# the <code>kernel</code> weights matrix
# (see <a href="https://keras.io/regularizers/">regularizer</a>).</li>
# <li><strong>bias_regularizer</strong>: Regularizer function applied to the bias vector
# (see <a href="https://keras.io/regularizers/">regularizer</a>).</li>
# <li><strong>activity_regularizer</strong>: Regularizer function applied to
# the output of the layer (its "activation").
# (see <a href="https://keras.io/regularizers/">regularizer</a>).</li>
# <li><strong>kernel_constraint</strong>: Constraint function applied to the kernel matrix
# (see <a href="https://keras.io/constraints/">constraints</a>).</li>
# <li><strong>bias_constraint</strong>: Constraint function applied to the bias vector
# (see <a href="https://keras.io/constraints/">constraints</a>).</li>
# </ul>
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# #### Example
# Assuming
# ``keras.backend.image_data_format == "channels_last"``
# ```python
#
# # apply a 3x3 convolution with 64 output filters on a 256x256 image:
# model = Sequential()
# model.add(Conv2D(64, (3, 3), padding='same',
# input_shape=(3, 256, 256)))
# # now model.output_shape == (None, 256, 256, 64)
#
# # add a 3x3 convolution on top, with 32 output filters:
# model.add(Conv2D(32, (3, 3), padding='same'))
# # now model.output_shape == (None, 256, 256, 32)
#
# ```
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ## Dimensions of Conv filters in Keras
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# The complex structure of ConvNets *may* lead to a representation that is challenging to understand.
# + [markdown] deletable=true editable=true slideshow={"slide_type": "fragment"}
# Of course, the dimensions vary according to the dimension of the Convolutional filters (e.g. 1D, 2D)
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ### Convolution1D
#
# **Input Shape**:
#
# **3D** tensor with shape: (`batch_size`, `steps`, `input_dim`).
#
# **Output Shape**:
#
# **3D** tensor with shape: (`batch_size`, `new_steps`, `filters`).
# + [markdown] deletable=true editable=true slideshow={"slide_type": "subslide"}
# ### Convolution2D
#
# **Input Shape**:
#
# **4D** tensor with shape:
#
# - (`batch_size`, `channels`, `rows`, `cols`) if `image_data_format='channels_last'`
# - (`batch_size`, `rows`, `cols`, `channels`) if `image_data_format='channels_first'`
#
# **Output Shape**:
#
# **4D** tensor with shape:
#
# - (`batch_size`, `filters`, `new_rows`, `new_cols`)
# if `image_data_format='channels_first'`
# - (`batch_size`, `new_rows`, `new_cols`, `filters`) if `image_data_format='channels_last'`
# -
| 2.1 Convolutional Neural Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="KDmXsEGhauan"
#
#
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_SPAM.ipynb)
#
#
#
# + [markdown] colab_type="text" id="UChknajcavFm"
# # **Detect Spam messages**
# + [markdown] colab_type="text" id="n_Zz-kwQa7Ez"
# ## 1. Colab Setup
# + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" executionInfo={"elapsed": 72700, "status": "ok", "timestamp": 1597072416524, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="sIJfXkK54WFk" outputId="87532ba1-e03b-4530-b388-bb9fde9b5407"
# Install java
# !apt-get update -qq
# !apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
# !java -version
# Install pyspark
# !pip install --ignore-installed -q pyspark==2.4.4
# Install Sparknlp
# !pip install --ignore-installed spark-nlp
# + colab={} colab_type="code" executionInfo={"elapsed": 1053, "status": "ok", "timestamp": 1597072475126, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="v29AZ9XO5AhU"
import pandas as pd
import numpy as np
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
# + [markdown] colab_type="text" id="pqorlWy9a9pF"
# ## 2. Start Spark Session
# + colab={} colab_type="code" executionInfo={"elapsed": 31538, "status": "ok", "timestamp": 1597072506683, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="sI-CZ9PO5GW9"
spark = sparknlp.start()
# + [markdown] colab_type="text" id="0JaFJmC_bD04"
# ## 3. Select the DL model
# + colab={} colab_type="code" executionInfo={"elapsed": 29265, "status": "ok", "timestamp": 1597072506684, "user": {"displayName": "<NAME>aq", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="Y6s6ljDsH9ZK"
### Select Model
model_name = 'classifierdl_use_spam'
# + [markdown] colab_type="text" id="aGttu2LqbAIn"
# ## 4. Some sample examples
# + colab={} colab_type="code" executionInfo={"elapsed": 27988, "status": "ok", "timestamp": 1597072506684, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="WGFzBK1EX8wm"
text_list=[
"""Hiya do u like the hlday pics looked horrible in them so took mo out! Hows the camp Amrca thing? Speak soon Serena:)""",
"""U have a secret admirer who is looking 2 make contact with U-find out who they R*reveal who thinks UR so special-call on 09058094594""",]
# + [markdown] colab_type="text" id="BcE65Pc0bGPO"
# ## 5. Define Spark NLP pipeline
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 120370, "status": "ok", "timestamp": 1597072602135, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="K2CS_jdi5Phc" outputId="a92c706d-df06-4588-d443-997a2ece7146"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
# + [markdown] colab_type="text" id="n1wPffwfbKNb"
# ## 6. Run the pipeline
# + colab={} colab_type="code" executionInfo={"elapsed": 122433, "status": "ok", "timestamp": 1597072605480, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="RZxkeqNpbR02"
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df)
# + [markdown] colab_type="text" id="7nvJc6dwbX9X"
# ## 7. Visualize results
# + colab={"base_uri": "https://localhost:8080/", "height": 156} colab_type="code" executionInfo={"elapsed": 125234, "status": "ok", "timestamp": 1597072609459, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10508284328555930330"}, "user_tz": -300} id="P84W1Z4uPI_b" outputId="1debc8b6-a888-40f6-c1d5-6231cf78db99"
result.select(F.explode(F.arrays_zip('document.result', 'class.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("document"),
F.expr("cols['1']").alias("class")).show(truncate=False)
| tutorials/streamlit_notebooks/CLASSIFICATION_EN_SPAM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # One Pixel Attack Tutorial
# ## Part 1 - Cifar10
#
# ### <NAME>
# ### February 3, 2019
# 
# This notebook will demonstrate the one pixel attack with a few different convolutional neural network models. By using differential evolution, we find a special pixel that can modify a target image such that the network misclassifies the image (which it previously correctly classified).
#
# In theory, we want models that don't get fooled by such tiny changes. Especially in images, it is undesirable to have a small alteration in the input result in a drastic change in the output. However, even the most accurate neural networks are susceptible to this type of attack.
#
# To read more about it, see [the original paper](https://arxiv.org/abs/1710.08864), or the authors' [official repo](https://github.com/Carina02/One-Pixel-Attack).
#
# Let's get started.
# ## Imports
# Ensure that you have `numpy`, `pandas`, `scipy`, `matplotlib`, `tensorflow-gpu`, and `keras` installed.
#
# Alternatively, you may [run this notebook in Google Colab](https://colab.research.google.com/drive/1Zq1kGP9C7i-70-SXyuEEaqYngtyQZMn7). Note: colab allows you to run this notebook on GPU, free of charge. Simply select "GPU" in the Accelerator drop-down in Notebook Settings (either through the Edit menu or the command palette at cmd/ctrl-shift-P).
# +
# If running in Google Colab, import files
try:
import google.colab
in_colab = True
except:
in_colab = False
if in_colab:
# !git clone https://github.com/Hyperparticle/one-pixel-attack-keras.git
# !mv -v one-pixel-attack-keras/* .
# !rm -rf one-pixel-attack-keras
# Python Libraries
# %matplotlib inline
import pickle
import numpy as np
import pandas as pd
import matplotlib
from keras.datasets import cifar10
from keras import backend as K
# Custom Networks
from networks.lenet import LeNet
from networks.pure_cnn import PureCnn
from networks.network_in_network import NetworkInNetwork
from networks.resnet import ResNet
from networks.densenet import DenseNet
from networks.wide_resnet import WideResNet
from networks.capsnet import CapsNet
# Helper functions
from differential_evolution import differential_evolution
import helper
matplotlib.style.use('ggplot')
np.random.seed(100)
# -
# ## Load Dataset
# For this attack, we will use the [Cifar10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) packaged by Keras. The task of the dataset is to correctly classify a 32x32 pixel image in 1 of 10 categories (e.g., bird, deer, truck).
#
# The code below will load the Cifar10 dataset. Keras will need to download the dataset if it is not cached locally already.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# We can access and display any image in the dataset by its index. For instance, here is a horse.
image = 99 # Image index in the test set
helper.plot_image(x_test[image])
# ## Image Perturbation
# To begin, we need a function to modify one or more pixels in an image.
#
# We can define the perturbation of a pixel as a 5-tuple
#
# $$\textbf{x} = (x, y, r, g, b)$$
#
# where $x, y$ are the coordinates of the pixel from 0 to 31, and $r,g,b$ are the red, green, and blue values from 0 to 255. Then multiple perturbations can simply be a concatenation of these tuples:
#
# $$X = (x_1, y_1, r_1, g_1, b_1, x_2, y_2, r_2, g_2, b_2, ...)$$
#
# We could instead use an array of tuples, but the optimization algorithm we will use requires it to be a flat 1-d vector.
#
# Then the function to perturb an image can take as an input the image and $X$, and output a copy of the image with each pixel at $x_i, y_i$ modified to have the color $r_i, g_i, b_i$. To speed up computation, we will batch together an array of $X$ perturbations, denoted $X_S$.
def perturb_image(xs, img):
# If this function is passed just one perturbation vector,
# pack it in a list to keep the computation the same
if xs.ndim < 2:
xs = np.array([xs])
# Copy the image n == len(xs) times so that we can
# create n new perturbed images
tile = [len(xs)] + [1]*(xs.ndim+1)
imgs = np.tile(img, tile)
# Make sure to floor the members of xs as int types
xs = xs.astype(int)
for x,img in zip(xs, imgs):
# Split x into an array of 5-tuples (perturbation pixels)
# i.e., [[x,y,r,g,b], ...]
pixels = np.split(x, len(x) // 5)
for pixel in pixels:
# At each pixel's x,y position, assign its rgb value
x_pos, y_pos, *rgb = pixel
img[x_pos, y_pos] = rgb
return imgs
# Now we can modify the pixels of any image we want.
#
# Let's modify our horse image by making pixel (16,16) yellow.
# +
image = 99 # Image index in the test set
pixel = np.array([16, 16, 255, 255, 0]) # pixel = x,y,r,g,b
image_perturbed = perturb_image(pixel, x_test[image])[0]
helper.plot_image(image_perturbed)
# -
# ## Load Models
# To demonstrate the attack, we need some neural network models trained on the Cifar10 dataset. We will now load some pretrained models, which can be found in the `networks/models` directory.
#
# It is recommended to use Keras with a GPU enabled. If you're [running in Google Colab](https://colab.research.google.com/drive/1Zq1kGP9C7i-70-SXyuEEaqYngtyQZMn7), you can enable a GPU instance by selecting `Runtime > Change runtime type > Hardware accelerator > GPU` (you will need to re-run all cells). The code below can be used to check (if using TensorFlow).
# Should output /device:GPU:0
K.tensorflow_backend._get_available_gpus()
# There are two models included in this repository, `lenet` and `resnet` which will be loaded from disk automatically.
#
# Optionally, you may [download the larger, more accurate models](https://www.dropbox.com/sh/dvatkpjl0sn79kn/AAC9L4puJ_sdFUkDZfr5SFkLa?dl=0) (e.g., Capsule Network, DenseNet, etc.). Make sure to copy the models into the `networks/models/` directory. Then uncomment the lines below and run the cell to load the models of your choosing.
# +
lenet = LeNet()
resnet = ResNet()
models = [lenet, resnet]
## Uncomment below to load more models to play with. Make sure the model files exist by training or downloading them.
# lenet = LeNet()
# pure_cnn = PureCnn()
# net_in_net = NetworkInNetwork()
# resnet = ResNet()
# densenet = DenseNet()
# wide_resnet = WideResNet()
# capsnet = CapsNet()
# models = [lenet, pure_cnn, net_in_net, resnet, densenet, wide_resnet, capsnet]
# -
# Note that there are even more networks available in the `networks` directory, but must be trained before loading them here.
# ### Calculate Model Accuracies
# After loading the models, we would like to evaluate all test images with each model to ensure that we only attack the images which have been classified correctly. The code below will also display the accuracy and number of parameters of each model.
# +
network_stats, correct_imgs = helper.evaluate_models(models, x_test, y_test)
correct_imgs = pd.DataFrame(correct_imgs, columns=['name', 'img', 'label', 'confidence', 'pred'])
network_stats = pd.DataFrame(network_stats, columns=['name', 'accuracy', 'param_count'])
network_stats
# -
# ### Prediction Function
# For the black-box attack, all we should care about is the inputs to the model (the images), and the outputs of the model (the prediction probabilities). No special information about the model is required; we could even swap it with a model that is not a neural network.
#
# Define a function that runs several perturbed images on a given model and returns the model's confidence (probability output) in the target class, one confidence value per image. If the target class is the correct class, this will be the function that we want to minimize so that the model will be most confident in another class (which is incorrect). Otherwise, the target is an incorrect class and we will want to maximize it.
def predict_classes(xs, img, target_class, model, minimize=True):
# Perturb the image with the given pixel(s) x and get the prediction of the model
imgs_perturbed = perturb_image(xs, img)
predictions = model.predict(imgs_perturbed)[:,target_class]
# This function should always be minimized, so return its complement if needed
return predictions if minimize else 1 - predictions
# Below we can modify a pixel in an image and see how the confidence of the model changes. In almost all cases, the confidence will not change. However, for very special cases it will change drastically.
# +
image = 384
pixel = np.array([16, 13, 25, 48, 156])
model = resnet
true_class = y_test[image,0]
prior_confidence = model.predict_one(x_test[image])[true_class]
confidence = predict_classes(pixel, x_test[image], true_class, model)[0]
print('Confidence in true class', class_names[true_class], 'is', confidence)
print('Prior confidence was', prior_confidence)
helper.plot_image(perturb_image(pixel, x_test[image])[0])
# -
# ## The Attack
# Here we will demonstrate two variants of the one pixel attack: untargeted and targeted.
# ### Targeted vs. Untargeted Attacks
# The objective of an untargeted attack is to cause a model to misclassify an image. This means we want to perturb an image as to minimize the confidence probability of the correct classification category and maximize the sum of the probabilities of all other categories.
#
# The objective of a targeted attack is to cause a model to classify an image as a given target class. We want to perturb an image as to maximize the probability of a class of our own choosing.
# ### Success Criterion
# Define a function so that whenever a given perturbation is sufficient to fool a model, it returns `True`. This will be called the success criterion. The `targeted_attack` boolean flag will indicate whether success means maximization of the target class or minimization of the correct (target) class.
def attack_success(x, img, target_class, model, targeted_attack=False, verbose=False):
# Perturb the image with the given pixel(s) and get the prediction of the model
attack_image = perturb_image(x, x_test[img])
confidence = model.predict(attack_image)[0]
predicted_class = np.argmax(confidence)
# If the prediction is what we want (misclassification or
# targeted classification), return True
if (verbose):
print('Confidence:', confidence[target_class])
if ((targeted_attack and predicted_class == target_class) or
(not targeted_attack and predicted_class != target_class)):
return True
# NOTE: return None otherwise (not False), due to how Scipy handles its callback function
# Here we demonstrate the usage of the success criterion function. It's nearly identical to `predict_class()` as before, but also decides the success of the attack. For purposes of demonstration we assume an untargeted attack.
# +
image = 541
pixel = np.array([17, 18, 185, 36, 215])
model = resnet
true_class = y_test[image,0]
prior_confidence = model.predict_one(x_test[image])[true_class]
success = attack_success(pixel, image, true_class, model, verbose=True)
print('Prior confidence', prior_confidence)
print('Attack success:', success == True)
helper.plot_image(perturb_image(pixel, x_test[image])[0])
# -
# ### Attack Function
# Finally, we arrive at the attack itself: how do we find the pixels that will result in a successful attack? First, formulate it as an optimization problem: in an untargeted attack, minimize the confidence of the correct class, and in a targeted attack, maximize the confidence of a target class. This is precisely our `predict_class` function.
#
# When performing black-box optimizations such as the one pixel attack, it can be very difficult to find an efficient gradient-based optimization that will work for the problem. It would be nice to use an optimization algorithm that can find good solutions without relying on the smoothness of the function. In our case, we have discrete integer positions ranging from 0 to 31 and color intensities from 0 to 255, so the function is expected to be jagged.
#
# For that, we use an algorithm called [differential evolution](https://en.wikipedia.org/wiki/Differential_evolution). Here's an example of differential evolution optimizing the [Ackley function](https://en.wikipedia.org/wiki/Ackley_function) (if you're using Google Colab, run the code cell below):
#
# <br>
#
# 
#
# <br>
#
# Differential evolution is a type of evolutionary algorithm where a population of candidate solutions generate offspring which compete with the rest of the population each generation according to their fitness. Each candidate solution is represented by a vector of real numbers which are the inputs to the function we would like to minimize. The lower the output of this function, the better the fitness. The algorithm works by initializing a (usually random) population of vectors, generating new offspring vectors by combining (mutating) individuals in the population, and replacing worse-performing individuals with better candidates.
#
# In the context of the one pixel attack, our input will be a flat vector of pixel values:
#
# $$X = (x_1, y_1, r_1, g_1, b_1, x_2, y_2, r_2, g_2, b_2, ...)$$
#
# These will be encoded as floating-point values, but will be floored back into integers to calculate image perturbations. First we generate a random population of $n$ perturbations
#
# $$\textbf{P} = (X_1, X_2, \dots, X_n)$$
#
# Then, on each iteration we calculate $n$ new mutant children using the formula
#
# $$X_i = X_{r1} + F (X_{r2} - X_{r3})$$
#
# such that
#
# $$r1 \neq r2 \neq r3$$
#
# where $r1,r2,r3$ are random indices into our population $\textbf{P}$, and $F = 0.5$ is a mutation parameter. Basically, we pick 3 random individuals from the previous generation and recombine them to make a new candidate solution. If this candidate $X_i$ gives a lower minimum at position $i$ (i.e., the attack is closer to success), replace the old $X_i$ with this new one. This process repeats for several iterations until our stopping criterion, `attack_success`, which is when we find an image that successfully completes the attack.
#
# <br>
#
# See [this excellent tutorial post](https://pablormier.github.io/2017/09/05/a-tutorial-on-differential-evolution-with-python/) on how differential evolution works in greater detail.
#
# We will use a [slight modification](differential_evolution.py) of [Scipy's implementation of differential evolution](https://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.differential_evolution.html) to utilize GPU parallelism by batching predictions together.
# + colab={} colab_type="code" id="tlO00HlxbB0k"
# Run this cell if you are using Google Colab to see the Ackley GIF
if in_colab:
from IPython.display import Image
with open('images/Ackley.gif','rb') as file:
display(Image(file.read()))
# + colab={} colab_type="code" id="7rlzqXpqSgrW"
def attack(img, model, target=None, pixel_count=1,
maxiter=75, popsize=400, verbose=False):
# Change the target class based on whether this is a targeted attack or not
targeted_attack = target is not None
target_class = target if targeted_attack else y_test[img,0]
# Define bounds for a flat vector of x,y,r,g,b values
# For more pixels, repeat this layout
bounds = [(0,32), (0,32), (0,256), (0,256), (0,256)] * pixel_count
# Population multiplier, in terms of the size of the perturbation vector x
popmul = max(1, popsize // len(bounds))
# Format the predict/callback functions for the differential evolution algorithm
predict_fn = lambda xs: predict_classes(
xs, x_test[img], target_class, model, target is None)
callback_fn = lambda x, convergence: attack_success(
x, img, target_class, model, targeted_attack, verbose)
# Call Scipy's Implementation of Differential Evolution
attack_result = differential_evolution(
predict_fn, bounds, maxiter=maxiter, popsize=popmul,
recombination=1, atol=-1, callback=callback_fn, polish=False)
# Calculate some useful statistics to return from this function
attack_image = perturb_image(attack_result.x, x_test[img])[0]
prior_probs = model.predict_one(x_test[img])
predicted_probs = model.predict_one(attack_image)
predicted_class = np.argmax(predicted_probs)
actual_class = y_test[img,0]
success = predicted_class != actual_class
cdiff = prior_probs[actual_class] - predicted_probs[actual_class]
# Show the best attempt at a solution (successful or not)
helper.plot_image(attack_image, actual_class, class_names, predicted_class)
return [model.name, pixel_count, img, actual_class, predicted_class, success, cdiff, prior_probs, predicted_probs, attack_result.x]
# -
# #### Untargeted Attack
# Let's look at one iteration of the untargeted attack. Here we will demonstrate a successful attack an image of a frog with the `resnet` model. We should see the confidence in the true class drop after several iterations.
#
# Try to see if you can successfully attack other images/models. The more pixels we are allowed to modify, the more likely it is we are to find a solution for any given image.
# +
image = 102
pixels = 1 # Number of pixels to attack
model = resnet
_ = attack(image, model, pixel_count=pixels, verbose=True)
# -
# #### Targeted Attack
# In the targeted attack, we can choose which class we want a model to classify an image as. The task is much harder for the targeted attack, as we constrain the misclassification to a given class rather than any class that's not the correct one. We should see the confidence in the target class rise after several iterations.
#
# Below we try to cause the `lenet` to classify an image of a `ship` as an `automobile`. Try to change the parameters and see what happens.
# +
image = 108
target_class = 1 # Integer in range 0-9
pixels = 3
model = lenet
print('Attacking with target', class_names[target_class])
_ = attack(image, model, target_class, pixel_count=pixels, verbose=True)
# -
# ### Collect Results
# Armed with all the necessary tools to conduct a one pixel attack, the final step is to collect relevant statistics on the targeted and untargeted attack. The relevant data points are what percentage of images were we able to successfully attack for a given model, and how the number of pixels affect this percentage.
#
# We will loop through every combination of all models, perturbations of 1,3,5 pixels, images, and target classes (for the targeted attack). This will take a lot of computational resources and time, so [skip to the statistics section](#Attack-Statistics) if that's not your idea of fun.
def attack_all(models, samples=500, pixels=(1,3,5), targeted=False,
maxiter=75, popsize=400, verbose=False):
results = []
for model in models:
model_results = []
valid_imgs = correct_imgs[correct_imgs.name == model.name].img
img_samples = np.random.choice(valid_imgs, samples, replace=False)
for pixel_count in pixels:
for i,img in enumerate(img_samples):
print(model.name, '- image', img, '-', i+1, '/', len(img_samples))
targets = [None] if not targeted else range(10)
for target in targets:
if (targeted):
print('Attacking with target', class_names[target])
if (target == y_test[img,0]):
continue
result = attack(img, model, target, pixel_count,
maxiter=maxiter, popsize=popsize,
verbose=verbose)
model_results.append(result)
results += model_results
helper.checkpoint(results, targeted)
return results
untargeted = attack_all(models, samples=100, targeted=False)
targeted = attack_all(models, samples=10, targeted=True)
# ### Attack Statistics
# Print the final results!
# Load the results
untargeted, targeted = helper.load_results()
columns = ['model', 'pixels', 'image', 'true', 'predicted', 'success', 'cdiff', 'prior_probs', 'predicted_probs', 'perturbation']
untargeted_results = pd.DataFrame(untargeted, columns=columns)
targeted_results = pd.DataFrame(targeted, columns=columns)
# #### Untargeted
helper.attack_stats(untargeted_results, models, network_stats)
# #### Targeted
helper.attack_stats(targeted_results, models, network_stats)
# ### Show some successful attacks
# Plot 9 random successful attack images
print('Untargeted Attack')
helper.visualize_attack(untargeted_results, class_names)
print('Targeted Attack')
helper.visualize_attack(targeted_results, class_names)
# ## Conclusions
# It appears that the accuracy of a model is not strongly correlated with the chance of performing a successful attack on an image. Perhaps surprisingly, the purely convolutional model is the most resistant CNN to these types of attacks. In addition, the capsule network CapsNet has the lowest attack success rate out of all the models, although it is still vulnerable to attack.
#
# [Part 2](1_one-pixel-attack-cifar10.ipynb)
# ## Credits
# - This implemenation is based off of the original paper describing the one pixel attack: https://arxiv.org/abs/1710.08864
# - Base code for iPython notebook: https://github.com/09rohanchopra/cifar10
# - Keras Cifar10 models: https://github.com/BIGBALLON/cifar-10-cnn
# - Scipy's differential evolution implementation: https://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.differential_evolution.html
# - State of the art: https://github.com/RedditSota/state-of-the-art-result-for-machine-learning-problems
# - CapsNet Keras: https://github.com/XifengGuo/CapsNet-Keras
# - CapsNet with Cifar: https://github.com/theblackcat102/dynamic-routing-capsule-cifar
| 1_one-pixel-attack-cifar10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="AYV_dMVDxyc2"
# [](https://github.com/lab-ml/nn)
# [](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/gan/original/experiment.ipynb)
#
# ## DCGAN
#
# This is an experiment training DCGAN model.
# + [markdown] id="AahG_i2y5tY9"
# Install the `labml-nn` package
# + id="ZCzmCrAIVg0L" colab={"base_uri": "https://localhost:8080/"} outputId="2fe2685f-731c-4c47-854e-a4f00e485281"
# !pip install labml-nn
# + [markdown] id="SE2VUQ6L5zxI"
# Imports
# + id="0hJXx_g0wS2C"
from labml import experiment
from labml_nn.gan.original.experiment import Configs
# + [markdown] id="Lpggo0wM6qb-"
# Create an experiment
# + id="bFcr9k-l4cAg"
experiment.create(name="mnist_gan")
# + [markdown] id="-OnHLi626tJt"
# Initialize configurations
# + id="Piz0c5f44hRo"
conf = Configs()
# + [markdown] id="wwMzCqpD6vkL"
# Set experiment configurations and assign a configurations dictionary to override configurations
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="e6hmQhTw4nks" outputId="4be767af-0ebd-4c35-8da0-0e532495e037"
experiment.configs(conf,
{'label_smoothing': 0.01})
# + [markdown] id="KJZRf8527GxL"
# Start the experiment and run the training loop.
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="aIAWo7Fw5DR8" outputId="e3b02247-8ff9-47b5-8f52-49c9e3b8377f"
with experiment.start():
conf.run()
# + id="oBXXlP2b7XZO"
| labml_nn/gan/original/experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="581fc77f5ca382c167ca97908228cd6370befc05"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
import re
warnings.filterwarnings('ignore')
# + _uuid="ab46fee75c1d2ffba761cc51b69dc196cb67dffd"
import os
print(os.listdir("../input"))
# + _uuid="2051644e6a38ccd345c29da588473e563312c03e"
train = pd.read_excel('../input/Final_Train.xlsx')
test = pd.read_excel('../input/Final_Test.xlsx')
# + _uuid="5fc2d27a1a7813960efccbf88e912a22475951c6"
train.head()
# + _uuid="4dc48d75e4f2af058a9a166696cb5ac06cea5a2b"
train.info()
# + _uuid="a4c0ed787ff1c54c1c2e070fb7f7e3d8f73539e0"
print('Qualification:', train['Qualification'].nunique())
print('Experience:', train['Experience'].nunique())
print('Rating:', train['Rating'].nunique())
print('Place:', train['Place'].nunique())
print('Profile', train['Profile'].nunique())
# + [markdown] _uuid="a1e0cebbdbb37954bdf9a81b1943b4b0f6647a17"
# #### Data Pre-processing
# + _uuid="639ef159f0f964b60c816fe3c94010c427f3b56a"
df_train = train[['Qualification', 'Profile', 'Experience', 'Place', 'Miscellaneous_Info', 'Rating','Fees']]
df_test = test[['Qualification', 'Profile','Experience', 'Place', 'Miscellaneous_Info', 'Rating']]
# + _uuid="f30679103a2c936b114bd8e8f2198ca3d7de4eef"
df_train.head()
# + _uuid="e348ad2c2bbc3ca979aae568c4f4df40bd65c18a"
df_train['Experience'] = df_train['Experience'].str.replace('years experience','').astype(int)
df_test['Experience'] = df_test['Experience'].str.replace('years experience','').astype(int)
# + _uuid="2cb4ed32ace687b85a7ec84b7edfc1ccfd9baa24"
def clean_text(text):
text = str(text).lower()
text = re.sub(r'[^a-z]', ' ', text)
text = re.sub(r'\s+', ' ', text)
return text
df_train['Qualification'] = df_train['Qualification'].apply(clean_text)
df_test['Qualification'] = df_test['Qualification'].apply(clean_text)
def clean_place(text):
text = str(text).lower()
text = re.sub(r'[^a-z0-9]', ' ', text)
text = re.sub(r'\s+', ' ', text)
return text
df_train['Place'].fillna('missing', inplace=True)
df_test['Place'].fillna('missing', inplace=True)
df_train['Place'] = df_train['Place'].apply(clean_place)
df_test['Place'] = df_test['Place'].apply(clean_place)
# + _uuid="c71caa50abef7246b075590e0eff50d53054d6c5"
df_train['Rating'].fillna('0%', inplace=True)
df_test['Rating'].fillna('0%', inplace=True)
def clean_rating(text):
text = re.sub(r'%', '', str(text))
return text
df_train['Rating'] = df_train['Rating'].apply(clean_rating)
df_train['Rating'] = df_train['Rating'].astype(int)
df_test['Rating'] = df_test['Rating'].apply(clean_rating)
df_test['Rating'] = df_test['Rating'].astype(int)
# + _uuid="966234ad2ed6672b2f7177db9055b7614341d378"
df_train['Miscellaneous_Info'].fillna('missing', inplace=True)
df_test['Miscellaneous_Info'].fillna('missing', inplace=True)
def get_feedback(feedback):
feedback = re.findall(r'\d+ Feedback', str(feedback))
if feedback == []:
feedback = '0 Feedback'
return feedback
else:
return feedback[0]
df_train['Feedback'] = df_train['Miscellaneous_Info'].apply(get_feedback)
df_train['Feedback'] = df_train['Feedback'].str.replace(' Feedback','')
df_train['Feedback'] = df_train['Feedback'].astype(int)
df_test['Feedback'] = df_test['Miscellaneous_Info'].apply(get_feedback)
df_test['Feedback'] = df_test['Feedback'].str.replace(' Feedback','')
df_test['Feedback'] = df_test['Feedback'].astype(int)
# + _uuid="682c84d5bc7b6a253960fd765e9f382914a8873d"
def get_fee(text):
text = re.sub(r',', "", text)
text = re.findall(r'₹\d+', text)
if text != []:
return text[0]
else:
return 0
df_train['Misc_Fees'] = df_train['Miscellaneous_Info'].apply(get_fee)
df_train['Misc_Fees'] = df_train['Misc_Fees'].str.replace('₹','')
df_train['Misc_Fees'].fillna(50, inplace=True)
df_train['Misc_Fees'] = df_train['Misc_Fees'].astype(int)
df_test['Misc_Fees'] = df_test['Miscellaneous_Info'].apply(get_fee)
df_test['Misc_Fees'] = df_test['Misc_Fees'].str.replace('₹','')
df_test['Misc_Fees'].fillna(50, inplace=True)
df_test['Misc_Fees'] = df_test['Misc_Fees'].astype(float)
# + _uuid="fb7c9a9280ebb2f27cca7b4097ad5150a932ce39"
def clean_misc(text):
text = str(text).lower()
text = re.sub(r'[^a-z0-9]', ' ', text)
text = re.sub(r'\s+', ' ', text)
return text
df_train['Miscellaneous_Info'] = df_train['Miscellaneous_Info'].apply(clean_misc)
df_test['Miscellaneous_Info'] = df_test['Miscellaneous_Info'].apply(clean_misc)
# + _uuid="d90e8732b10e9a1f504a2d88c9717ad37da5648b"
df_train = pd.get_dummies(df_train, columns=['Profile'])
df_test = pd.get_dummies(df_test, columns=['Profile'])
# + _uuid="43f452451df2630c105190955625c5a74010c258"
df_train.head()
# + [markdown] _uuid="6e46fb0e846de8fb0f2306a963e55994d6fdc4d3"
# #### train test split
# + _uuid="b5f447dd32e2bd76a6a4b79cc2c5da79acf7062e"
X = df_train.drop(labels=['Fees'], axis=1)
y = df_train['Fees'].values
from sklearn.model_selection import train_test_split
X_train, X_cv, y_train, y_cv = train_test_split(X, y, test_size=0.25, random_state=1)
# + _uuid="7baf728114e7f6d8353c78e0ce74f1a52413a602"
X_train.shape, y_train.shape, X_cv.shape, y_cv.shape
# + _uuid="0ccdd19532301b5fd8c67ba335c82dd4ba80ee4f"
X_train.columns
# + [markdown] _uuid="3e6279bc7383a4c1fa842dba3b108f26eb9ba576"
# #### build model
# + _uuid="2d9d598c02a0748471640678d18a8062fd0f25f8"
X_train_qual = X_train['Qualification']
X_cv_qual = X_cv['Qualification']
X_train_place = X_train['Place']
X_cv_place = X_cv['Place']
X_train_misc = X_train['Miscellaneous_Info']
X_cv_misc = X_cv['Miscellaneous_Info']
from sklearn.feature_extraction.text import TfidfVectorizer
tf1 = TfidfVectorizer(ngram_range=(1,2), binary=True, token_pattern=r'\w{3,}')
X_train_qual = tf1.fit_transform(X_train_qual)
X_cv_qual = tf1.transform(X_cv_qual)
tf2 = TfidfVectorizer()
X_train_place = tf2.fit_transform(X_train_place)
X_cv_place = tf2.transform(X_cv_place)
tf3 = TfidfVectorizer(token_pattern=r'\w{1,}', ngram_range=(1,2))
X_train_misc = tf3.fit_transform(X_train_misc)
X_cv_misc = tf3.transform(X_cv_misc)
# + _uuid="2841de8bdeafd4a72b6aa6e776d7a2b788ee7ccd"
X_train_exp = np.array(X_train['Experience']).reshape(-1,1)
X_cv_exp = np.array(X_cv['Experience']).reshape(-1,1)
X_train_feedback = np.array(X_train['Feedback']).reshape(-1,1)
X_cv_feedback = np.array(X_cv['Feedback']).reshape(-1,1)
X_train_rating = np.array(X_train['Rating']).reshape(-1,1)
X_cv_rating = np.array(X_cv['Rating']).reshape(-1,1)
cols = ['Profile_Dentist', 'Profile_Dermatologists', 'Profile_ENT Specialist', 'Profile_General Medicine',
'Profile_Homeopathy']
X_train_prof = X_train[cols]
X_cv_prof = X_cv[cols]
X_train_miscfees = np.array(X_train['Misc_Fees']).reshape(-1,1)
X_cv_miscfees = np.array(X_cv['Misc_Fees']).reshape(-1,1)
# + _uuid="1ca9ce382aa3772118c72e02a59dce1dd7a4ec2e"
from math import sqrt
from sklearn.metrics import mean_squared_log_error, mean_squared_error
# + _uuid="0256f33dfad364b7ac808ca80af251261eb27cf1"
from scipy.sparse import hstack
merged_train = hstack((X_train_exp, X_train_qual, X_train_prof, X_train_place, X_train_rating, X_train_misc, X_train_miscfees))
merged_cv = hstack((X_cv_exp, X_cv_qual, X_cv_prof, X_cv_place, X_cv_rating, X_cv_misc, X_cv_miscfees))
# + _uuid="6099c6af678aad2d968d870c4e5912d908d3c1a5"
merged_train.shape, merged_cv.shape
# + _uuid="00b8dbdd2af81cf9506c99a07543b2fff4894d4e"
from xgboost import XGBRegressor
xgb = XGBRegressor(learning_rate=0.02,
gamma=100,
max_depth=25,
min_child_weight=1,
max_delta_step=0,
subsample=0.75,
colsample_bylevel=0.95,
colsample_bytree=0.70,
reg_lambda=1)
xgb.fit(merged_train, y_train)
y_pred1 = xgb.predict(merged_cv)
y_pred1 = y_pred1.astype(int)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_pred1)))
# + _uuid="a8b8a5ac8bc66debd3acdeaa3f9fbcd0eb8092c6"
import lightgbm as lgb
train_data = lgb.Dataset(merged_train, label=y_train)
test_data = lgb.Dataset(merged_cv, label=y_cv)
param = {'objective': 'regression',
'boosting': 'dart',
'num_iterations': 152,
'learning_rate': 0.1,
'num_leaves': 52,
'max_depth': 28,
'min_data_in_leaf': 3,
'feature_fraction': 0.66,
'feature_fraction_seed': 10,
'drop_seed': 4,
'alpha': 100,
'max_bin': 7,
'min_data_in_bin': 45,
'metric': 'l2_root'
}
lgbm = lgb.train(params=param,
verbose_eval=0,
train_set=train_data,
valid_sets=[test_data])
y_pred2 = lgbm.predict(merged_cv)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_pred2)))
# + _uuid="b378e9dd706ac75f6c874b825547541a812c172b"
from sklearn.ensemble import BaggingRegressor
br = BaggingRegressor(base_estimator=None,
n_estimators=80,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=True,
oob_score=True,
n_jobs=None,
random_state=13,
verbose=0)
br.fit(merged_train, y_train)
y_pred5 = br.predict(merged_cv)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_pred5)))
print('RMSE:', sqrt(mean_squared_error(y_cv, y_pred5)))
# 0.58019310689049
# + _uuid="3cfea32f1213edd391ef45cd4aa88c1e9b44a817"
from scipy.sparse import hstack
merged_train = hstack((X_train_exp, X_train_feedback, X_train_qual, X_train_prof, X_train_place, X_train_rating, X_train_misc))
merged_cv = hstack((X_cv_exp, X_cv_feedback, X_cv_qual, X_cv_prof, X_cv_place, X_cv_rating, X_cv_misc))
# + _uuid="adc8c486c690e97129d2a2305945fab908d362d1"
from sklearn.ensemble import GradientBoostingRegressor
gb = GradientBoostingRegressor(loss='lad',
learning_rate=0.2,
random_state=10,
n_estimators=92,
max_depth=11,
subsample=1.0,
min_samples_split=40,
min_samples_leaf=1,
max_features='auto')
gb.fit(merged_train, y_train)
y_pred3 = gb.predict(merged_cv)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_pred3)))
# + _uuid="c7eade7dd37a3073b8a418d454544a01abffc345"
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=29,
criterion='mse',
max_depth=58,
min_samples_split=5,
min_samples_leaf=2,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.20,
bootstrap=True,
oob_score=True,
n_jobs=-1,
random_state=11)
rf.fit(merged_train, y_train)
y_pred4 = rf.predict(merged_cv)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_pred4)))
# + _uuid="8100df6e653bf2b9eb96db3e5b849c7d1c91490e"
y_predx = y_pred1*.4 + y_pred2*0.5 + y_pred3*0.1
y_predx = y_predx.astype(int)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_predx)))
# + _uuid="fbd551f29eb0ebd616dd33560a266956ac3b2fd0"
y_predy = y_pred1*0.40 + y_pred2*0.45 + y_pred3*0.10 + y_pred5*0.05
y_predy = y_predy.astype(int)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_predy)))
# + _uuid="017852befc54cd6e56013bfda34b306ac7b5bbcc"
y_pred = y_predx*.98 + y_predy*0.02
y_pred = y_pred.astype(int)
print('RMSLE:', sqrt(mean_squared_log_error(y_cv, y_pred)))
# + [markdown] _uuid="6d66afc23d1c8d4ba777d138b012973590be53fa"
# #### Now, predict on test set
# + _uuid="213ebe92adce1bf1fb9dc1e729cbaa701c060f56"
X_train = df_train.drop(labels='Fees', axis=1)
y_train = df_train['Fees'].values
X_test = df_test
# + _uuid="6ed685415eb99f411f0c1074fcbdbfc5843c8ee9"
X_train_qual = X_train['Qualification']
X_test_qual = X_test['Qualification']
X_train_place = X_train['Place']
X_test_place = X_test['Place']
X_train_misc = X_train['Miscellaneous_Info']
X_test_misc = X_test['Miscellaneous_Info']
from sklearn.feature_extraction.text import TfidfVectorizer
tf1 = TfidfVectorizer(ngram_range=(1,2), binary=True, token_pattern=r'\w{3,}')
X_train_qual = tf1.fit_transform(X_train_qual)
X_test_qual = tf1.transform(X_test_qual)
tf3 = TfidfVectorizer()
X_train_place = tf3.fit_transform(X_train_place)
X_test_place = tf3.transform(X_test_place)
tf4 = TfidfVectorizer(token_pattern=r'\w{1,}', ngram_range=(1,2))
X_train_misc = tf4.fit_transform(X_train_misc)
X_test_misc = tf4.transform(X_test_misc)
# + _uuid="c243b5fb84386e4abf827fb5cf6e39546bdd75f6"
X_train_exp = np.array(X_train['Experience']).reshape(-1,1)
X_test_exp = np.array(X_test['Experience']).reshape(-1,1)
X_train_feedback = np.array(X_train['Feedback']).reshape(-1,1)
X_test_feedback = np.array(X_test['Feedback']).reshape(-1,1)
X_train_rating = np.array(X_train['Rating']).reshape(-1,1)
X_test_rating = np.array(X_test['Rating']).reshape(-1,1)
cols = ['Profile_Dentist', 'Profile_Dermatologists', 'Profile_ENT Specialist', 'Profile_General Medicine',
'Profile_Homeopathy']
X_train_prof = X_train[cols]
X_test_prof = X_test[cols]
X_train_miscfees = np.array(X_train['Misc_Fees']).reshape(-1,1)
X_test_miscfees = np.array(X_test['Misc_Fees']).reshape(-1,1)
# + _uuid="965b4ab6f029f95f085106cf19fd2c132b5470ef"
from scipy.sparse import hstack
merged_train = hstack((X_train_exp, X_train_qual, X_train_prof, X_train_place, X_train_rating, X_train_misc, X_train_miscfees))
merged_test = hstack((X_test_exp, X_test_qual, X_test_prof, X_test_place, X_test_rating, X_test_misc, X_test_miscfees))
# + _uuid="90aa97035ad63f20b07a95c7c5d0d52d90609384"
merged_train.shape, merged_test.shape
# + _uuid="7ab74fcfed4463bb08360ccd336b552ba22af900"
from xgboost import XGBRegressor
xgb = XGBRegressor(learning_rate=0.02,
gamma=100,
max_depth=25,
min_child_weight=1,
max_delta_step=0,
subsample=0.75,
colsample_bylevel=0.95,
colsample_bytree=0.70,
reg_lambda=1)
xgb.fit(merged_train, y_train)
y_pred1 = xgb.predict(merged_test)
# + _uuid="2ed8adacf9a32fc93c078253e5b445923400fca8"
import lightgbm as lgb
train_data = lgb.Dataset(merged_train, label=y_train)
param = {'objective': 'regression',
'boosting': 'dart',
'num_iterations': 137,
'learning_rate': 0.1,
'num_leaves': 52,
'max_depth': 33,
'min_data_in_leaf': 3,
'feature_fraction': 0.54,
'feature_fraction_seed': 50,
'alpha': 100,
'max_bin': 7,
'min_data_in_bin': 45,
'metric': 'l2_root'
}
lgbm = lgb.train(params=param,
train_set=train_data)
y_pred2 = lgbm.predict(merged_test)
# + _uuid="4cb7b732a39c8f572e9480f964154e79b3831456"
from sklearn.ensemble import BaggingRegressor
br = BaggingRegressor(base_estimator=None,
n_estimators=80,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=True,
oob_score=True,
n_jobs=None,
random_state=13, #4
verbose=0)
br.fit(merged_train, y_train)
y_pred5 = br.predict(merged_test)
# + _uuid="fa4c57a57a65f513eca98aeb8dd8af7e25ef5e4a"
from scipy.sparse import hstack
merged_train = hstack((X_train_exp, X_train_feedback, X_train_qual, X_train_prof, X_train_place, X_train_rating, X_train_misc))
merged_test = hstack((X_test_exp, X_test_feedback, X_test_qual, X_test_prof, X_test_place, X_test_rating, X_test_misc))
# + _uuid="fd85de8d77658788c05983c2189d7eacf8d45797"
merged_train.shape, merged_test.shape
# + _uuid="28d4b284ef0b0afdc2fafcb085e25d3aea88ea9a"
from sklearn.ensemble import GradientBoostingRegressor
gb = GradientBoostingRegressor(loss='lad',
learning_rate=0.2, #0.2
random_state=10,
n_estimators=92,
max_depth=11, #11,
subsample=1.0,
min_samples_split=40,
min_samples_leaf=1,
max_features='auto')
gb.fit(merged_train, y_train)
y_pred3 = gb.predict(merged_test)
# + _uuid="6af50aee202aa2ea061a11e29d290e109a4a1d7a"
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=29, #25, 29
criterion='mse',
max_depth=58, #55, 58
min_samples_split=5,
min_samples_leaf=2,
min_weight_fraction_leaf=0.0,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.20, #0.20,
bootstrap=True,
oob_score=True,
n_jobs=-1,
random_state=11)
rf.fit(merged_train, y_train)
y_pred4 = rf.predict(merged_test)
# + _uuid="88d3baa1a0d6c9ba6485ed21ecd61f377a3896f3"
y_pred1.shape, y_pred2.shape, y_pred3.shape, y_pred4.shape, y_pred5.shape
# + _uuid="da45808769fed3c3957c266f6601783f2aab56a6"
y_predx = y_pred1*.4 + y_pred2*0.5 + y_pred3*0.10
y_predx = y_predx.astype(int)
# + _uuid="34a4bb4167de6f15e22ebe104d91e1306b165fde"
y_predy = y_pred1*0.40 + y_pred2*0.45 + y_pred3*0.10 + y_pred5*0.05
y_predy = y_predy.astype(int)
# + _uuid="4e254731e0991d11967b9b92ef898db75d6820cb"
y_pred = y_predx*.98 + y_predy*0.02
y_pred = y_pred.astype(int)
# + [markdown] _uuid="fc7f8df4c8acd61fb66d38fd3d16b44127a56190"
# #### write predictions to output file
# + _uuid="fe88dfef696e96f12650f0e53bc748fa8dcaf43e"
df_sub = pd.DataFrame(data=y_pred, columns=['Fees'])
writer = pd.ExcelWriter('output74.xlsx', engine='xlsxwriter')
df_sub.to_excel(writer,sheet_name='Sheet1', index=False)
writer.save()
| Predict Doctor Consultation Fee.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#本章需导入的模块
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import warnings
warnings.filterwarnings(action = 'ignore')
# %matplotlib inline
plt.rcParams['font.sans-serif']=['SimHei'] #解决中文显示乱码问题
plt.rcParams['axes.unicode_minus']=False
from sklearn.datasets import make_classification,make_circles,make_regression
from sklearn.model_selection import train_test_split
import sklearn.neural_network as net
import sklearn.linear_model as LM
from scipy.stats import multivariate_normal
from sklearn.metrics import r2_score,mean_squared_error
from sklearn import svm
N=100
X,Y=make_classification(n_samples=N,n_features=2,n_redundant=0,n_informative=2,class_sep=1,random_state=1,n_clusters_per_class=1)
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,train_size=0.85, random_state=123)
markers=['^','o']
for k,m in zip([1,0],markers):
plt.scatter(X_train[Y_train==k,0],X_train[Y_train==k,1],marker=m,s=40)
for k,m in zip([1,0],markers):
plt.scatter(X_test[Y_test==k,0],X_test[Y_test==k,1],marker=m,s=40,c='',edgecolors='g')
plt.title("100个样本观测点的分布")
plt.xlabel("X1")
plt.ylabel("X2")
plt.grid(True,linestyle='-.')
plt.show()
# 代码说明:
# (1)第1,2行:生成用于二分类的样本量N=100的一组随机样本,包括两个输入变量。同时指定两个分类的样本观测点彼此不相交。
# (2)第3行:利用旁置法按85%和15%将数据集划分成训练集和测试集。
# (3)第5至13行:分别将训练集和测试集的样本观测,以不同符号和颜色绘制在图上。
#
# +
X1,X2= np.meshgrid(np.linspace(X_train[:,0].min(),X_train[:,0].max(),300),np.linspace(X_train[:,1].min(),X_train[:,1].max(),300))
X0=np.hstack((X1.reshape(len(X1)*len(X2),1),X2.reshape(len(X1)*len(X2),1)))
fig,axes=plt.subplots(nrows=2,ncols=2,figsize=(12,8))
for seed,H,L in [(123,0,0),(3000,0,1),(0,1,0),(20,1,1)]:
NeuNet=net.MLPClassifier(activation='logistic',random_state=seed,hidden_layer_sizes=(10,),max_iter=200)
NeuNet.fit(X_train,Y_train)
#NeuNet.out_activation_ #输出节点的激活函数
Y0=NeuNet.predict(X0)
axes[H,L].scatter(X0[np.where(Y0==1),0],X0[np.where(Y0==1),1],c='lightgray')
axes[H,L].scatter(X0[np.where(Y0==0),0],X0[np.where(Y0==0),1],c='mistyrose')
for k,m in [(1,'^'),(0,'o')]:
axes[H,L].scatter(X_train[Y_train==k,0],X_train[Y_train==k,1],marker=m,s=40)
axes[H,L].scatter(X_test[Y_test==k,0],X_test[Y_test==k,1],marker=m,s=40,c='',edgecolors='g')
axes[H,L].grid(True,linestyle='-.')
axes[H,L].set_title("分类平面(随机数种子=%d,测试误差=%.2f)"%(seed,1-NeuNet.score(X_test,Y_test)))
# -
# 代码说明:
# (1)第1,2行:为绘制分类边界准备数据:为在两个输入变量取值范围内的90000个样本观测点。
# (2)第4至16行:利用for循环进行四次感知机网络的训练。
| chapter8-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Suite of scripts for analyzing MSAs using Singular Value Decomposition - Homeodomain
#
# ### Version 0, updated 02-09-2022
#
# #### Written by <NAME>, <NAME>, and <NAME>
# This file walks through a simple SVD analysis of HD and does not include all of the plots from the paper. It can be adapted to any protein MSA by changing the parameters and filenames in cell 2, along with potentially the number of clusters in cell 12.
# +
import os
import cv2
import imageio
import numpy as np
import pandas as pd
import scipy as sp
import Bio
from Bio import SeqIO
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
# -
# ## Parameters & Filenames
# +
# name of MSA alignment
ALIGNMENT = 'HD_aligned_gapStrip.txt'
# name of protein
PROTEIN_NAME = 'HD'
# 1 if generating and saving movies (.mp4) generated, 0 if not
MOVIES = 0
# styling
plt.style.use('svd.mplstyle.txt')
# -
# ## Import Multiple Sequence Alignment
# +
fasta_sequences = SeqIO.parse(open(ALIGNMENT),'fasta')
names = []
sequences = []
count = 0
for fasta in fasta_sequences:
count += 1
name, seq = fasta.id, str(fasta.seq)
names.append(name)
seq = seq.replace('X', '-')
sequences.append(seq)
num_seqs = len(sequences)
# -
# ## One-Hot Encoding Function for Sequences
# Function ohe takes protein sequence string and returns a one-hot encoded version of that sequence
def ohe(list_residues):
# dictionary for amino acids to binary representation
encoder = {'A':'10000000000000000000','C':'01000000000000000000','D':'00100000000000000000',
'E':'00010000000000000000','F':'00001000000000000000','G':'00000100000000000000',
'H':'00000010000000000000','I':'00000001000000000000','K':'00000000100000000000',
'L':'00000000010000000000','M':'00000000001000000000','N':'00000000000100000000',
'P':'00000000000010000000','Q':'00000000000001000000','R':'00000000000000100000',
'S':'00000000000000010000','T':'00000000000000001000','V':'00000000000000000100',
'W':'00000000000000000010','Y':'00000000000000000001','-':'00000000000000000000',
'Z':'00000000000000000000','B':'00000000000000000000'}
protein = "".join([encoder[R] for R in list_residues])
ohe_list = [int(char) for char in protein]
return ohe_list
# +
ohe_list = []
for seq in sequences:
ohe_list.append(ohe(seq))
F = np.array(ohe_list)
print('F-matrix generated.')
# -
# ## Perform SVD on F-matrix
# +
U, S, VT = np.linalg.svd(F)
V = VT.transpose()
print('SVD has been performed.')
len_array = np.zeros(len(sequences))
# -
# ### Run this if you want the $v_i^{(1)}$ and the $\sigma_1 u_i^{(1)}$ elements to be positive.
if np.sum(V[:, 0]) < 0:
U = -U
V = -V
# ## Bar plots of singular values
#
# ### Bar plot of $\sigma_i$ and cumulative $\sigma_i$
# +
sum_of_sigmas = np.sum(S)
cumsum_of_sigmas = np.cumsum(S)
xticks = []
for num in range(1, 21):
xticks.append(num)
mpl.rc('font', family='Arial', size=14)
labelfont = {'family': 'Arial', 'size': 24}
fig1 = plt.figure(figsize=(12, 4.5))
plt.subplot(1, 2, 1)
plt.bar(np.arange(1, 21), height = S[:20], color='black')
plt.xticks(xticks)
plt.xlabel('$i$', fontsize = 18)
plt.ylabel('$\sigma_i$', fontsize = 18)
plt.subplot(1, 2, 2)
plt.bar(np.arange(1, 21), height = cumsum_of_sigmas[:20], color='black')
plt.xticks(xticks)
plt.xlabel('$i$', fontsize = 18)
plt.ylabel('Cumulative $\sigma_i$', fontsize = 18)
plt.tight_layout()
plt.savefig(f'figures/{PROTEIN_NAME}_sigma_barchart.pdf')
# -
# #### 3D plots of sequences along the first three singular axes
# +
fig4 = plt.figure(1, figsize = (8, 7))
ax = fig4.add_subplot(projection='3d')
ax.set_xlabel(f"$\sigma_{{{1}}} u_i^{{({1})}}$", fontsize=12)
ax.set_ylabel(f"$\sigma_{{{2}}} u_i^{{({2})}}$", fontsize=12)
ax.set_zlabel(f"$\sigma_{{{3}}} u_i^{{({3})}}$", fontsize=12)
ax.scatter(S[0]*U[:,0], S[1]*U[:,1], S[2]*U[:,2], c = 'black', s = 5)
# Get rid of colored axes planes
# First remove fill
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Now set color to white (or whatever is "invisible")
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.zaxis.pane.set_edgecolor('black')
ax.view_init(30,300)
# -
# ## K-Means Clustering
# ### Elbow plot to Choose Number of Clusters
#
# The WCSS is the sum of squared distance between each point in a cluster and the cluster centroid. We use the elbow method to minimize the WCSS with as few clusters as possible. This is typically found at the "elbow" of the graph.
# +
mpl.rc('font', family='Arial', size=14)
labelfont = {'family': 'Arial', 'size': 24}
wcss = []
SU_df = pd.DataFrame(S*U[:,:len(S)])
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(SU_df.iloc[:, 0:3])
wcss.append(kmeans.inertia_)
plt.figure(figsize=(6,4))
plt.plot(range(1, 11), wcss,'o',color='black')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.savefig(f'figures/{PROTEIN_NAME}_elbowplot.pdf')
# -
# ## Set the number of clusters and define colors
#
# +
CLUSTERS = 4
cluster_ID_colormap = {0 : 'red', 1 : 'blue', 2: 'orange', 3: 'green', 4: 'black', 5: 'violet'}
# -
# ## Perform K-Means clustering
kmeans = KMeans(n_clusters = CLUSTERS, init ='k-means++',
max_iter = 300, n_init = 10, random_state = 0)
cluster_ID = kmeans.fit_predict(SU_df.iloc[:, 0:3])
cluster_color = [cluster_ID_colormap[k] for k in cluster_ID]
# ## 2D plots of clustered sequences along the first three singular axes
# +
fig2 = plt.figure(figsize=(12,4))
for i in range(3):
plt.subplot(1, 3, i+1)
if i == 0:
plt.xlim(-0, 7)
else:
plt.xlim(-3, 6)
plt.ylim(-3, 4)
plt.scatter(S[i]*U[:,i], S[i+1]*U[:,i+1], s = 2, c = cluster_color)
plt.xlabel(f'$\sigma_{i+1} u_i^{{({i+1})}}$')
plt.ylabel(f'$\sigma_{i+2} u_i^{{({i+2})}}$')
plt.tight_layout()
plt.savefig(f'figures/{PROTEIN_NAME}_sigmaU_2dplots_cluster.pdf')
# -
# ## 3D plot of sequences along the first three singular axes
# +
fig6 = plt.figure(1, figsize = (8, 7))
ax = fig6.add_subplot(projection='3d')
ax.set_xlabel(f"$\sigma_{{{1}}} u_i^{{({1})}}$", fontsize=12)
ax.set_ylabel(f"$\sigma_{{{2}}} u_i^{{({2})}}$", fontsize=12)
ax.set_zlabel(f"$\sigma_{{{3}}} u_i^{{({3})}}$", fontsize=12)
ax.scatter(SU_df[0], SU_df[1], SU_df[2], c = cluster_color, s = 5)
# Get rid of colored axes planes
# First remove fill
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Now set color to white (or whatever is "invisible")
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.zaxis.pane.set_edgecolor('black')
ax.view_init(30,300)
plt.tight_layout()
plt.savefig(f'figures/{PROTEIN_NAME}_sigmaU_3dplot_cluster.pdf')
# Create movie of the 3D plot
if MOVIES == 1:
imagedata = []
for ii in range(0,360,1):
# create temporary frame for movie
ax.view_init(30, azim=ii)
plt.savefig('movie%d.png' % ii)
# save temporary frame for movie
data = imageio.imread('movie{}.png'.format(ii))
data = cv2.resize(data, (960, 960))
imagedata.append(data)
# remove temporary frame from directory
os.remove('movie{}.png'.format(ii))
imageio.mimwrite(f'figures/{PROTEIN_NAME}_sigmaU_3dplot_cluster.mp4', imagedata, format= '.mp4', fps = 20)
# -
# ## Generate a dictionary of the sequence names (e.g., A0A1J4JHK3/1-112) in each cluster (keyed by color), used for taxonomy analysis
# +
colors = list(cluster_ID_colormap.values())
names_in_color_dict = {c: [] for c in colors[:cluster_ID.max()+1]}
for j in range(len(cluster_ID)):
label = cluster_ID_colormap[cluster_ID[j]]
names_in_color_dict[label].append(names[j])
df = pd.DataFrame(dict([(k,pd.Series(v)) for k,v in names_in_color_dict.items()]))
df.to_csv(f'figures/{PROTEIN_NAME}_{CLUSTERS}_clusters.csv')
# -
# ## Generating F-matrices for individual clusters and differences of in-cluster vs. out-of-cluster residue frequencies
# The *F_cluster_dict* dictionary below separates the F matrix into an F-matrix for each cluster (e.g., *F_cluster_1*). There is also a residue count for each cluster (e.g., *F_cluster_1_sum*) and a fraction (e.g., *F_cluster_1_fraction*), where the residue count is divided by the number of sequences in each cluster.
#
# In addition, this dictionary contains the same information for sequences *not* in each cluster.
# +
F_cluster_dict = {}
minus_not = {}
for cluster in range(CLUSTERS):
F_cluster_dict[f'F_cluster_{cluster}'] = np.array([])
F_cluster_dict[f'F_cluster_{cluster}_sum'] = np.zeros(len(F[0]))
F_cluster_dict[f'F_not_cluster_{cluster}'] = np.array([])
F_cluster_dict[f'F_not_cluster_{cluster}_sum'] = np.zeros(len(F[0]))
is_cluster = np.where(cluster_ID == cluster)[0]
is_not_cluster = np.where(cluster_ID != cluster)[0]
F_cluster_dict[f'F_cluster_{cluster}'] = F[is_cluster]
F_cluster_dict[f'F_cluster_{cluster}_sum'] = sum(F[is_cluster])
F_cluster_dict[f'F_not_cluster_{cluster}'] = F[is_not_cluster]
F_cluster_dict[f'F_not_cluster_{cluster}_sum'] = sum(F[is_not_cluster])
# save frequency of residues in sequences
F_cluster_dict[f'F_cluster_{cluster}_fraction'] = \
F_cluster_dict[f'F_cluster_{cluster}_sum']/len(F_cluster_dict[f'F_cluster_{cluster}'])
F_cluster_dict[f'F_not_cluster_{cluster}_fraction'] = \
F_cluster_dict[f'F_not_cluster_{cluster}_sum']/len(F_cluster_dict[f'F_not_cluster_{cluster}'])
minus_not[f'{cluster}_minus_not_{cluster}'] = \
F_cluster_dict[f'F_cluster_{cluster}_fraction'] - F_cluster_dict[f'F_not_cluster_{cluster}_fraction']
# -
# ## Create lists of residues where in-cluster frequency exceeds out-of-cluster frequency by a specified threshold
# +
threshold = 0.4
enriched = {}
depleted = {}
not_enriched = []
for minus, c in zip(minus_not, range(CLUSTERS)):
enriched[f'{c}_enriched'] = np.where(minus_not[minus] > threshold)[0]
depleted[f'{c}_depleted'] = np.where(minus_not[minus] < threshold)[0]
all_depleted = [v for k,v in depleted.items()]
not_enriched = all_depleted[all_depleted == CLUSTERS]
for c in range(CLUSTERS):
num = len(enriched[f'{c}_enriched'])
print(f'Number of residues enriched in cluster {colors[c]} is {num}.')
print(f'Number of residues not enriched in any cluster is {len(not_enriched)}.')
# -
# ## Collect $v_i^{(k)}$ values for enriched residues in each cluster.
# +
enriched_V = {}
V_len = len(V.transpose())
for c in range(CLUSTERS):
enriched_V[f'{c}_enriched_V'] = np.zeros((len(enriched[f'{c}_enriched']), V_len))
enriched_V[f'{c}_enriched_V'] = V[enriched[f'{c}_enriched'], :]
not_enriched_V = np.zeros((len(not_enriched), len(V.transpose())))
not_enriched_V = V[not_enriched, :]
# -
# ### Plot $v_i^{(k)}$ values for enriched and not-enriched residues.
# #### 3D plot of cluster-enriched $v_i^{(k)}$ values along the first three singular axes
# +
fig = plt.figure(1, figsize = (8, 7))
ax = fig.add_subplot(projection='3d')
ax.set_xlabel(f"$v_i^{{({1})}}$", fontsize=12)
ax.set_ylabel(f"$v_i^{{({2})}}$", fontsize=12)
ax.set_zlabel(f"$v_i^{{({3})}}$", fontsize=12)
ax.scatter(not_enriched_V[:, 0], not_enriched_V[:, 1], not_enriched_V[:, 2], c = 'k', s = 18)
for c in range(CLUSTERS):
ax.scatter(enriched_V[f'{c}_enriched_V'][:, 0],
enriched_V[f'{c}_enriched_V'][:, 1],
enriched_V[f'{c}_enriched_V'][:, 2], c = colors[c], s = 30)
ax.view_init(30,300)
plt.tight_layout()
plt.savefig(f'figures/{PROTEIN_NAME}_V_enriched_3dplot.pdf')
# Create movie of the 3D plot
if MOVIES == 1:
imagedata = []
for ii in range(0,360,1):
# create temporary frame for movie
ax.view_init(30, azim=ii)
plt.savefig('movie%d.png' % ii)
# save temporary frame for movie
data = imageio.imread('movie{}.png'.format(ii))
data = cv2.resize(data, (960, 960))
imagedata.append(data)
# remove temporary frame from directory
os.remove('movie{}.png'.format(ii))
imageio.mimwrite(f'figures/{PROTEIN_NAME}_V_enriched_3dplot.mp4', imagedata, format= '.mp4', fps = 20)
# -
# #### 2D plot of cluster-enriched $v_i^{(k)}$ values along the first four singular axes
# +
fig = plt.figure(figsize=(12,4))
for i in range(3):
plt.subplot(1, 3, i+1)
plt.scatter(not_enriched_V[:, i], not_enriched_V[:, i+1], c = 'k', s = 12)
for c in range(CLUSTERS):
plt.scatter(enriched_V[f'{c}_enriched_V'][:, i],
enriched_V[f'{c}_enriched_V'][:, i+1], c = colors[c], s = 20)
plt.xlabel(f"$v_i^{{({i+1})}}$")
plt.ylabel(f"$v_i^{{({i+2})}}$")
plt.tight_layout()
plt.savefig(f'figures/{PROTEIN_NAME}_V_enriched_2dplots.pdf')
# -
| Homeodomain/HD_SVD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit (conda)
# language: python
# name: python37664bitconda1496054765594920a6930834e868f3ab
# ---
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# ## Import necessary dependencies
# +
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2
from PIL import Image
from collections import Counter
import matplotlib.pyplot as plt
import sqlite3
import re
import nltk
nltk.download('stopwords')
nltk.download('gutenberg')
nltk.download('punkt')
from keras.preprocessing import text
from keras.utils import np_utils
from keras.preprocessing import sequence
import pydot
pd.options.display.max_colwidth = 200
# %matplotlib inline
# -
# ## Load in the data from the database
dbconn = sqlite3.connect('./data/newsclassifier.db')
train_data_df = pd.read_sql_query('SELECT * FROM train_data_sample', dbconn)
headline_bagofwords_df = pd.read_sql_query('SELECT * FROM headline_bagofwords', dbconn)
dbconn.commit()
dbconn.close()
# ### Check the if the data was loaded correctly
# +
train_data_df.head()
# +
headline_bagofwords_df.head()
# +
train_data_df.drop('index', axis=1, inplace=True)
train_data_df.head()
# +
headline_bagofwords_df.drop('index', axis=1, inplace=True)
headline_bagofwords_df.head()
# -
# ### We have bag of words already, let's make a Bag of N-Grams
# +
# Use countvectorizer to get a word vector
cv = CountVectorizer(min_df = 2, lowercase = True, token_pattern=r'(?u)\b[A-Za-z]{2,}\b',
strip_accents = 'ascii', ngram_range = (2, 3),
stop_words = 'english')
cv_matrix = cv.fit_transform(train_data_df.headline_cleaned).toarray()
# below is if wanted to define a specific category for the data.
# cv_matrix = cv.fit_transform(train_data_df[train_data_df.category == 1].headline_cleaned).toarray()
# get all unique words in the corpus
vocab = cv.get_feature_names()
# produce a dataframe including the feature names
headline_bagofngrams_df = pd.DataFrame(cv_matrix, columns=vocab)
# -
# ### Make sure we got the dataframe output for the Bag of N-Grams
headline_bagofngrams_df.head()
# ### Let's explore the data we got through plots and tables
# +
word_count_dict = {}
for word in vocab:
word_count_dict[word] = int(sum(headline_bagofngrams_df.loc[:, word]))
counter = Counter(word_count_dict)
freq_df = pd.DataFrame.from_records(counter.most_common(20),
columns=['Top 20 words', 'Frequency'])
freq_df.plot(kind='bar', x='Top 20 words');
# -
# ## TF/IDF
# ### Unigram TF/IDF
# +
tfidf_vect = TfidfVectorizer(sublinear_tf = True, min_df = 2, lowercase = True,
strip_accents = 'ascii', ngram_range = (1, 1),
stop_words = 'english', use_idf = True, token_pattern=r'(?u)\b[A-Za-z]{2,}\b')
tfidf_unigram = tfidf_vect.fit_transform(train_data_df.headline_cleaned).toarray()
# get all unique words in the corpus
vocab = tfidf_vect.get_feature_names()
tfidf_unigram = pd.DataFrame(np.round(tfidf_unigram, 2), columns = vocab)
tfidf_unigram.head()
# -
# ### N-Gram TF/IDF
# +
tfidf_vect = TfidfVectorizer(sublinear_tf = True, min_df = 2, lowercase = True,
strip_accents = 'ascii', ngram_range = (2, 3),
stop_words = 'english', use_idf = True, token_pattern=r'(?u)\b[A-Za-z]{2,}\b')
tfidf_ngram = tfidf_vect.fit_transform(train_data_df.headline_cleaned).toarray()
# get all unique words in the corpus
vocab = tfidf_vect.get_feature_names()
tfidf_ngram = pd.DataFrame(np.round(tfidf_ngram, 2), columns = vocab)
tfidf_ngram.head()
# -
# ### Character TF/IDF
# +
tfidf_vect = TfidfVectorizer(analyzer = 'char', sublinear_tf = True, min_df = 2,
lowercase = True, strip_accents = 'ascii', ngram_range = (2, 3),
stop_words = 'english', use_idf = True, token_pattern=r'\w{1,}')
tfidf_char = tfidf_vect.fit_transform(train_data_df.headline_cleaned).toarray()
# get all unique words in the corpus
vocab = tfidf_vect.get_feature_names()
tfidf_char = pd.DataFrame(np.round(tfidf_char, 2), columns = vocab)
tfidf_char.head()
# +
word_count_dict = {}
for word in vocab:
word_count_dict[word] = int(sum(tfidf_char.loc[:, word]))
counter = Counter(word_count_dict)
freq_df = pandas.DataFrame.from_records(counter.most_common(50),
columns=['Top 50 words', 'Frequency'])
freq_df.plot(kind='bar', x='Top 50 words');
# -
# ## Word Embedding
# Build the Corpus Vocabulary
# +
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts(train_data_df.headline_cleaned)
word2id = tokenizer.word_index
# build vocabulary of unique words
word2id['PAD'] = 0
id2word = {v:k for k, v in word2id.items()}
wids = [[word2id[w] for w in text.text_to_word_sequence(doc)] for doc in train_data_df.headline_cleaned]
vocab_size = len(word2id)
embed_size = 100
window_size = 2 # context window size
print('Vocabulary Size:', vocab_size)
print('Vocabulary Sample:', list(word2id.items())[:100])
# +
# Build a CBOW (context, target) generator
def generate_context_word_pairs(corpus, window_size, vocab_size):
context_length = window_size*2
for words in corpus:
sentence_length = len(words)
for index, word in enumerate(words):
context_words = []
label_word = []
start = index - window_size
end = index + window_size + 1
context_words.append([words[i]
for i in range(start, end)
if 0 <= i < sentence_length
and i != index])
label_word.append(word)
x = sequence.pad_sequences(context_words, maxlen=context_length)
y = np_utils.to_categorical(label_word, vocab_size)
yield (x, y)
# Test this out for some samples
i = 0
for x, y in generate_context_word_pairs(corpus=wids, window_size=window_size, vocab_size=vocab_size):
if 0 not in x[0]:
print('Context (X):', [id2word[w] for w in x[0]], '-> Target (Y):', id2word[numpy.argwhere(y[0])[0][0]])
if i == 20:
break
i += 1
# +
# %% [markdown]
# ## Using gensim to build Word2Vec
# +
from gensim.models import word2vec
# tokenize sentences in corpus
wpt = nltk.WordPunctTokenizer()
tokenized_corpus = [wpt.tokenize(document) for document in train_data_df]
# Set values for various parameters
feature_size = 100 # Word vector dimensionality
window_context = 30 # Context window size
min_word_count = 1 # Minimum word count
sample = 1e-3 # Downsample setting for frequent words
w2v_model = word2vec.Word2Vec(tokenized_corpus, size=feature_size,
window=window_context, min_count=min_word_count,
sample=sample, iter=50)
# view similar words based on gensim's model
#similar_words = {search_term: [item[0] for item in w2v_model.wv.most_similar([search_term], topn=5)]
# for search_term in ['god', 'jesus', 'noah', 'egypt', #'john', 'gospel', 'moses','famine']}
#similar_words
# +
# %% [markdown]
# ## Visualize word embedding
# -
| feature-engineering - VK 12th April.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
def pythagorean_triple(limit):
z = 0
x = 2
while z < limit:
for y in range(x,x*x):
z = math.sqrt(x*x + y*y)
#Check if this is integer
if z.is_integer():
print(x,y,z)
x = x + 1
# -
pythagorean_triple(100)
def mean(l_list):
z = 0
for i in l_list:
z = i + z
return z / len(l_list)
def Variance(l_list):
m = mean(l_list)
z = 0
for i in l_list:
z = (m-i)**2 + z
return z / (len(l_list)-1)
def std_dev(l_list):
import math
z = math.sqrt(Variance(l_list))
return round(z)
l_list = [600, 470, 170, 430, 300]
std_dev(l_list)
def compounded_num(compounded):
if compounded == 'quarterly':
return 4
elif compounded == 'monthly':
return 12
elif compounded == 'weekly':
return 52
elif compounded == 'daily':
return 365
def compound_instrest(p,d,r,c):
"""
P - principal investment amount
d - number of years the money is invested
r - the annual interest rate(float)
c - compounded quarterly,monthly,weekly,daily
"""
simplified1= round(((r / 100)/d) + 1,5)
simplified2 = (compounded_num(c) * d)
simplified3=(simplified1 ** simplified2) * p
return simplified3
compound_instrest(5000,10,5,'monthly')
| notebooks/Assignment-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Try on your own input
# The following notebook shows how to practice with the code repository with you example text.
# **Pre-requisites**
# - Make sure you have installed and downloaded everything as the [README](https://github.com/dalab/end2end_neural_el#trying-the-system-on-random-user-input-text) mentions on the github page.
# - Once you follow the instructions you will have the server running at https://localhost:5555.
# - To quickly. run the following cell.-
import requests, json
requests.post("http://localhost:5555") ## if Response is [200] then it means the server is running
## Check using curl. (it is not nessasary to run the code)
# !curl -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' -d "{ \"text\": \"Obama will visit Germany and have a meeting with Merkel tomorrow.\", \"spans\": [] }" 'http://localhost:5555'
# ## Utility Functions
# +
import pprint
from IPython.display import Markdown
def query(text):
## Takes the input string and passes it to the service and gets the reponse back.
myjson = { "text": text, "spans": [] }
r = requests.post("http://localhost:5555", json=myjson)
return json.loads(r.content.decode('utf-8'))
def printmd(string):
## displays the annotated/tagged input text in jupyter's Markdown format
display(Markdown(string))
def format_index_output(text):
## main function which sends the input text to the service, gets the response back and formats the output
## in a presentable form to evaluate.
ents = query(text)
ents.sort(key=lambda tup: tup[0], reverse=True)
for i, ent in enumerate(ents):
text = text[:ent[0]] + '['+text[ent[0]:ent[0]+ent[1]]+'](https://en.wikipedia.org/wiki/'+ ent[2] +')' + text[ent[0]+ent[1]:]
# Replace $ sign : Quick fix since $ sign is a keyword in jupyter markdowns
text = text.replace("$","\\$")
printmd(text)
# -
text = ["Eminem is the best rapper of all time. MMLP was his best album, Eminem and Dre have produced this album",
"Pakistan has one of the best teams in cricket. The pakistani squad for ICC Cricket World Cup has <NAME> and <NAME>ar",
"KIEV: Separatist rebels have not fulfilled conditions like handing back border posts or laying down their weapons, Ukraine’s president said on Monday in a phone call with the leaders of Russia, Germany and France as he pondered whether to extend a ceasefire.The call between President <NAME>, Russia’s Vladimir Putin, Germany’s Chancellor <NAME> and France’s Francois Hollande took place as an expiration deadline neared for Ukraine’s shaky, unilateral ceasefire",
'''Brexit Party founder <NAME> who resigned from the party after posting a series of anti-Islam comments has backed Bor<NAME>’s Conservatives.
Ms Blaiklock, who set up the party and registered its name, accused <NAME> of going on a “monumental ego trip” and said his general election strategy had been a “disaster”.
“Nigel has failed catastrophically,” she told The Sun newspaper. “You have to compromise. If you want Brexit, you must vote Tory."
But opposition parties at Westminster seized on the endorsement, and the Liberal Democrat deputy leader <NAME> said: “Catherine joins a long list of unsavoury characters, including <NAME>, who are now backing <NAME>.”
He added: “The fact that Sir John Major, <NAME> and others are urging voters to keep the Tories out of power shows quite how far this Conservative party has sunk.”
<NAME> resigned from the Brexit Party earlier this year after a series of anti-Islam messages were uncovered by the Hope Not Hate organisation, which monitors the the far-right.
According to The Guardian, one of the messages shared by <NAME> was from a former BNP acivtist which referred to “white genocide” while one of her own remarks read: “Islam = submission – mostly raping men it seems”.'''
]
for t in text:
format_index_output(t)
| Examples _ End-to-End Neural Entity Linking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lekcja 9-10: Funkcje
# ## Spis treści
# 1. Co to jest funkcja?
#
#
# - 1.1. Funkcja jako "maszynka"
#
#
# - 1.2. Do czego służą funkcje?
# - Modularność
# - Prostota
# - Nazewnictwo funkcji
#
#
# - 1.3. Wywoływanie funkcji
# - Przykład: Funkcje do konwersji typów
# - Przykład: Funkcje wbudowane
# - Przykład: Metody to też funkcje!
# - Przykład: Podstawowe funkcje matematyczne
#
#
# 2. Definiowanie funkcji
#
#
# - 2.1. `def`
# - Ogólna składnia
# - Uwaga techniczna: `def` jest stwierdzeniem wykonywalnym
#
#
# - 2.2. Wariacje na temat `return`
# - Funkcje zwracające wiele rezultatów jednocześnie
# - Wielokrotny `return`
# - Funkcje nic nie zwracające
# - Instrukcja `pass`
# - Funkcje czyste i modyfikatory
#
#
# - 2.3. Wariacje na temat argumentów
# - Przekazywanie argumentów do funkcji
# - Argumenty pozycyjne (wymagane)
# - Argumenty nazwane
# - X
# - X
# - X
# - X
#
#
#
#
#
#
# ## 1. Co to jest funkcja?
# ### 1.1. Funkcja jako "maszynka"
# <img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/function.png'>Aby wprowadzić pojęcie **funkcji** ("function"), powinniśmy na chwilę wrócić do szkolnej matematyki.
#
# Ze szkoły na pewno pamiętasz wyrażenia typu $f(x) = 2x + 5$. Mamy tu funkcję o **nazwie** ("name") $f$, która przyjmuje **argument** ("argument") o nazwie $x$, będący liczbą, a **zwraca** ("returns") wynik, w tym przypadku rezultat obliczenia $2x + 5$. Podstawiając za $x$ konkretną liczbę, np. $x = 7$, otrzymujemy konkretny liczbowy rezultat, tutaj: $f(7) = 2 \cdot 7 + 5 = 19$.
#
# Można zatem funkcję wyobrazić sobie jako swoistą "maszynkę", do której z jednej strony wrzucamy "składniki"/argumenty, na których następnie "maszynka" dokonuje serię zdefiniowanych operacji, aby z drugiej strony wyrzucić "gotowy produkt"/rezultat.
#
# Funkcje w Pythonie generalnie działają dokładnie w ten sam sposób: należy dostarczyć im argumenty, na których przeprowadzane są wcześniej zdefiniowane operacje, a zwracany jest wynik tych operacji. Mogą to zrobić jednak w sposób dużo bardziej wszechstronny niż funkcje matematyczne!
# ### 1.2. Do czego służą funkcje?
# #### Modularność
# Z punktu widzenia programistycznego, taka "maszynka" to _wyodrębniony blok kodu, który wykonuje konkretnie zdefiniowaną czynność._ I jak to "maszynka", ma ona dwa podstawowe zastosowania:
#
# 1. Duże maszyny składają się z mniejszych elementów, a te z jeszcze mniejszych elementów itd. Dzięki funkcjom możemy rozbić duży, złożony problem na wiele małych, dobrze określonych, łatwiejszych do zrozumienia i zakodowania części ("procedural decomposition"). Np. programując robota przygotowującego pizzę, zamiast jednej długiej procedury `make_pizza`, lepiej rozbić ją na logiczne kawałki, `mix_dough`, `roll_out` , `add_toppings`, `bake` itp. Łatwiej takim kodem zarządzać, łatwiej szukać w nim błędów, łatwiej go skalować. Funkcje implementują zatem zasadę ["DRY = Don't Repeat Yourself"](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
#
# 2. Maszyny wyręczają nas w powtarzalnych czynnościach. Funkcja opisuje konkretną czynność, a zatem jest naturalnym obiektem wielokrotnego użytku - definiujemy ją raz, a możemy wywoływać wielokrotnie ("code reusability"). Również jeśli tę czynność chcemy później zmodyfikować, robimy to tylko raz - w ciele funkcji - a nie w każdym miejscu jej użycia. Co więcej, użyteczna funkcja może przydać się w kolejnych naszych projektach, nie tylko w danym programie.
#
# A zatem funkcje są bardzo uniwersalnym i podstawowym narzędziem strukturyzującym kod.
# #### Prostota
# Od razu praktyczna uwaga: Każda nasza funkcyjna "maszynka" powinna:
#
# - być w miarę prosta i czytelna; ważnym kryterium jest po prostu liczba linijek kodu składająca się na funkcję - powinna ona być w miarę niska;
#
# - nie robić zbyt wielu rzeczy naraz - a najlepiej tylko jedną rzecz!
#
# Tylko wówczas będzie dobrze spełniać swoją rolę strukturyzowania kodu i unikania powtórzeń.
#
# Jeśli w trakcie pisania programu stworzyliśmy zbyt długą i przez to niezbyt zrozumiałą funkcję, spróbujmy przepisać ją na serię mniejszych kawałków - robiących koniec końców to samo, ale w bardziej czytelnej formie. Proces takiego przepisania nazywa się **"refactoring"**.
# #### Nazewnictwo funkcji
# Dobrą praktyką programowania jest nadawanie funkcjom nazw złożonych z pełnych wyrazów angielskich, które dobrze opisują, co dana funkcja robi. Jest to inaczej niż w matematyce, gdzie funkcje zwykle nazywa się pojedynczymy literami, $f$, $g$ itp.
#
# - Dozwolone są wielkie i małe litery, cyfry, oraz podkreślnik `_`, z tym że nazwa nie może zaczynać się od cyfry. Nazwą nie może być też słowo zarezerwowane w Pythonie, takie jak `list`, `return` itp. - one już oznaczają coś konkretnego.
#
# - Nie powinno się stosować skrótów, jako że utrudniają one zrozumienie nazwy przez osobę inną niż autor danego kodu.
#
# - Większość edytorów ma funkcję "auto-complete", a więc nawet jeśli nazwa funkcji będzie długa, to wystarczy wpisać ją w całości tylko raz, a później używać automatycznego uzupełniania, tj. zacząć pisać nazwę, a edytor dokończy ją za nas, po naciśnięciu odpowiedniego klawisza, np. `Tab`.
# ### 1.3. Wywoływanie funkcji
# Zobaczmy najpierw kilka przykładów funkcji. Zanim nauczymy się "konstruować"/definiować własne "maszynki"/funkcje, zobaczmy jak je "uruchamiać"/**wywoływać** ("call"). Podobnie jak w matematyce - np. $f(x)$ - robimy to poprzez nazwę funkcji i umieszczenie wartości jej argumentów w nawiasach okrągłych.
# #### Przykład: Funkcje do konwersji typów
# Jedną z pierwszych poznanych przez nas funkcji była funkcja `type`, która przyjmuje jeden argument - dowolny obiekt - a zwraca jego typ danych, np.:
type( 55 )
type( 'I love functions!' )
type( { 'name' : 'John' , 'age' : 22 } )
# Wielokrotnie też spotykaliśmy się z funkcjami służącymi do konwersji typów. W czasie tego kursu poznaliśmy wiele różnych rodzajów - **typów** ("types") - obiektów, np. liczby całkowite (typ `int`), liczby zmiennoprzecinkowe (typ `float`), stringi (typ `str`), wartości logiczne (typ `bool`), listy (typ `list`), tuple (typ `tuple`), zbiory (typ `set`), słowniki (typ `dict`) itd. Funkcje do konwersji typów mają takie same nazwy, jak typ, _na jaki_ chcemy konwertować. Np.:
str( 55 ) # konwersja int na str
int( '55' ) # konwersja str na int
list( 'abc' )
set( [ 1 , 2 , 3 , 1 , 2 , 3 ] )
bool( '' )
# #### Przykład: Funkcje wbudowane
# W Pythonie mamy oczywiście wiele funkcji, które ktoś wcześniej dla nas zdefiniował - są to tzw. **funkcje wbudowane** ("built-in functions"), tu jest ich [spis](https://docs.python.org/3/library/functions.html). Część z nich już często używaliśmy, choćby `len`, `sum`, `max`, `min`; mają one jeden argument, będący kolekcją, a zwracają odpowiednio długość, sumę elementów i maksymalny/minimalny element kolekcji (jeśli pytanie o to ma sens). Np.:
len( { 'name' : 'John' , 'age' : 22 } )
sum( [ int( digit ) for digit in str( 12345 ) ] )
max( 'honorificabilitudinitatibus' )
# ... itd.
# Kilka innych przykładów: Funkcja wbudowana o nazwie `abs` przyjmuje jeden argument liczbowy i zwraca jego wartość bezwzględną:
abs( -7.8 )
# Funkcja wbudowana `pow` przyjmuje dwa argumenty i zwraca pierwszy z nich podniesiony do potęgi równej drugiemu z nich:
pow( 4 , 2 )
# Poznaliśmy też funkcje `range` i pamiętamy, iż może one być wywołana z jednym, dwoma albo trzema argumentami, a zwraca odpowiedni ciąg liczb całkowitych, który po konwersji na listę wygląda tak:
list( range( 10 ) )
list( range( 5 , 10 ) )
list( range( 5 , 10 , 2 ) )
# Zauważmy tu dwie rzeczy: Po pierwsze, liczba argumentów funkcji `range` waha się od jednego do trzech, ni mniej, ni więcej - zobaczymy później, jak definiować funkcje, które mogą przyjmować _różną liczbę argumentów_. Po drugie, wywołaliśmy tu funkcję `list` z argumentem będącym rezultatem wywołania funkcji `range`.
# Poznaliśmy też funkcję `enumerate`, przyjmującą jako argument kolekcję uporządkowaną - np. string, listę, czy tuplę - a zwracającą listę tupli 2-elementowych:
list( enumerate( 'abc' ) )
# Inna funkcja wbudowana, `zip`, może przyjmować _dowolną_ liczbę argumentów będących kolekcjami uporządkowanymi i zwraca listę tupli zawierających kolejno ich pierwsze elementy, ich drugie elementy itd.
list( zip( 'quiz' , 'hazy' , 'jack' , 'lazy' , 'haze' ) )
# Również później zobaczymy, jak definiować funkcje, które mogą przyjmować _nieokreśloną z góry liczbę argumentów_.
# #### Przykład: Metody to też funkcje!
# W poprzednich Lekcjach wielokrotnie używaliśmy funkcji, ale także metod - i nigdy dokładnie nie wytłumaczyliśmy różnic między nimi. Są to pojęcia bardzo podobne - metoda to także funkcja, lecz "powiązana" z określonym typem danych. Mamy więc metody powiązane z typem danych `str`, takie jak `lower`, `join`, `replace` itd. (zob. Lekcja 2). Mamy metody powiązane z typem danych `list`, jak `index`, `count`, `append` (zob. Lekcja 5). Mamy metody powiązane z typem danych `dict`, jak `get` czy `update` (zob. Lekcja 8). Itd.
#
# Składnia wywoływania jest też nieco inna: Funkcje wywołujemy jak wyżej, tj. nazwa funkcji i argumenty w nawiasach okrągłych:
# ```
# function_name(arguments)
# ```
# Metodę natomiast - powiązaną z określonym typem danym - wywołujemy po kropce od obiektu tego typu, po której następuje nazwa metody i nawiasy okrągłe z ewentualnymi innymi argumentami (albo puste w środku, gdy tych argumentów nie ma):
# ```
# obj.method_name(other_arguments)
# ```
# Choć nie robi się tego w praktyce, można metodę wywołać identyczną składnią, jak funkcję - lecz trzeba wówczas:
# - podać jej "pełną" nazwę składającą się z nazwy typu, kropki i nazwy metody, np. metoda `count` typu danych `list` nazywa się w pełni `list.count`;
# - obiekt `obj`, na którym wywołujemy metodę, podać jako pierwszy argument w nawiasach okrągłych, a po nim dopiero ewentualną resztę argumentów.
#
# Zatem:
# ```
# type_name.method_name(obj, other_arguments)
# ```
# Np. zamiast:
'ABC'.lower()
# ... możemy równie dobrze napisać:
str.lower( 'ABC' )
# Zamiast:
', '.join( [ 'one' , 'two' , 'three' ] )
# ... możemy napisać:
str.join( ', ' , [ 'one' , 'two' , 'three' ] )
# Zamiast:
# +
lst = [ 3 , 1 , 3 , 3 , 3 , 2 , 1 , 2 , 3 ]
lst.count( 3 )
# -
# ... możemy napisać:
list.count( lst , 3 )
# Zamiast:
# +
d = { 'a' : 1 , 'b' : 2 }
d.get( 'c' , -1 )
# -
# ... możemy napisać:
dict.get( d , 'c' , -1 )
# I tak dalej. To pokazuje, że metody też są funkcjami - ale szczególnymi, bo powiązanymi z konkretnym typem danych, zdefiniowanymi w treści definicji tego typu.
# #### Przykład: Podstawowe funkcje matematyczne
# Innym przykładem są typowe funkcje matematyczne, od których zaczęliśmy tę lekcję. Nie są one częścią biblioteki standardowej Pythona, ale możemy je **zaimportować** ("import") z **modułu** ("module") o nazwie `math`. Moduł to po prostu plik, który ktoś wcześniej przygotował, a który zawiera m.in. definicje pewnych funkcji. Importowanie oznacza spojrzenie do tego pliku i użycie definicji funkcji tam zawartych. Programowanie w Pythonie najczęściej polega na pracy z konkretnymi modułami zawierającymi przydatne nam rozwiązania; większość rzeczy ktoś już gdzieś napisał! Codzienna praca z Pythona polega więc często na wyszukiwaniu w sieci potrzebnych nam modułów i zapoznawaniu się z ich dokumentacją; proste wyszukanie pozwala nam np. stwierdzić, że opis funkcji zawartych w module `math` znajduje się [tutaj](https://docs.python.org/3/library/math.html).
# Zaimportować moduł możemy w całości, co daje nam dostęp do wszystkich funkcji w nim zdefiniowanych. Piszemy wtedy np.:
# ```
# import math
# ```
# co daje nam dostęp do wszystkich funkcji w module o nazwie `math`. Trzeba wówczas jednak pamiętać o następującej zasadzie: wywołanie funkcji odbywa się nie tylko poprzez nazwę funkcji, ale nazwa ta musi poprzedzona być nazwą modułu oraz kropką. Przykładem funkcji w module `math` jest sinus, `sin`, a więc wywołując ją musimy użyć pełnej nazwy `math.sin`.
# +
import math # importujemy moduł math, który jest plikiem zawierającym definicje wielu funkcji matematycznych
math.sin( 0.87 ) # wywołanie funkcji math.sin, a więc funkcji sin z modułu math
# -
# Import możemy przeprowadzić również w sposób selektywny, a więc zaimportować z danego modułu jedynie potrzebne nam funkcje, a nie cały plik. Piszemy wówczas np.:
# ```
# from math import sin , cos , tan
# ```
# i wtedy do wywołania funkcji używamy jej nazwy _bez_ poprzedzającej nazwy modułu.
# +
from math import sin , cos , tan # z modułu math importujemy tylko kilka funkcji (i nie mamy dostępu do żadnych innych!)
tan( 0.87 ) * cos( 1.15 ) / sin( - 3.45 ) # teraz wywołanie odbywa się po prostu przy użyciu samej nazwy funkcji; nie: math.tan!
# -
# ## 2. Definiowanie funkcji
# ### 2.1. `def`
# #### Ogólna składnia
# Oprócz funkcji wbudowanych (np. `abs`, `len`, `sum`, ...), lub też takich, które importujemy z jakiegoś modułu (np. `math.sin`), możemy **definiować własne funkcje** ("user-defined functions", UDFs). Mając w pamięci analogię z "maszynką", która przetwarza argumenty i "wypluwa" wynik, z programistycznego punktu widzenia **definicja funkcji** ("function definition") będzie osobnym blokiem kodu, który:
#
# - nadaje funkcji nazwę;
#
# - zawiera listę argumentów, jakie funkcja przyjmuje;
#
# - definiuje operacje, jakie "maszynka" wykonuje na swoich argumentach;
#
# - definiuje końcowy rezultat, jaki funkcja zwraca.
#
# Ogólna składnia zbiera te wszystkie elementy, rozpoczynając od słowa kluczowego `def`, i zwykle wygląda tak:
#
# ```
# def function_name(arguments):
# # various operations
# return result
#
# ```
# Pierwsza linijka - z nazwą `function_name` i argumentami `arguments` - nazywa się **nagłówkiem** ("header"), a cała treść wykonywanych operacji to **ciało** ("body") funkcji, które zawiera w szczególności - po słowie kluczowym `return` - rezultat `result`, który funkcja **zwraca** ("returns"); ciało funkcji określone jest przez wcięcie kodu, identycznie jak przy instrukcjach warunkowych czy pętlach.
# Dla przykładu, prosta funkcja matematyczna z początku lekcji może być zdefiniowana jako:
def f( x ): # definicja funkcji o nazwie f z jednym argumentem x
# ciało funkcji
y = 2 * x + 5 # argument x jest przekształcany w y
return y # y jest rezultatem zwracanym przez funkcję
# ... a jej wywołanie jest:
f( 7 ) # wywołanie funkcji o nazwie f z argumentem o wartości 7
# Ta funkcja jest tak prosta, że całe wyrażenie obliczające rezultat możemy zawrzeć w linijce `return` bez ryzyka uczynienia kodu nieczytelnym.
def f( x ):
return 2 * x + 5
# Zobaczmy inny przykład takiej "krótkiej" funkcji - a więc tylko `return` i jedno wyrażenie: Napiszmy funkcję, która ma dwa argumenty: listę `lst` oraz pewną dodatnią liczbę całkowitą `limit`. Niech zwraca ona wartość logiczną `True`/`False` odpowiadającą na pytanie: czy długość listy `lst` jest większa lub równa od liczby `limit`. Długość listy `lst` dana jest - jak wiemy - poprzez wywołanie funkcji `len`, tj. `len(lst)`. Operator porównania daje tę wartość logiczną, np.:
len( [ 1 , 2 , 3 ] ) >= 10
# ... zatem ta funkcja miałaby po prostu postać:
def is_long( lst , limit ):
return len( lst ) >= limit
is_long( [ 1 , 2 , 3 ] , 10 )
# Funkcja nie musi mieć żadnych argumentów! Pamiętajmy jednak oczywiście o nawiasach okrągłych - zarówno w definicji takiej funkcji, jak i w jej wywołaniu!
def say_hello():
return 'Hello!'
say_hello()
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 1:
#
# (a) Napisz funkcję, która przyjmuje jako argument temperaturę w stopniach Fahrenheita i przelicza ją na stopnie Celsjusza, zgodnie ze wzorem $C = (F - 32) \cdot 5/9$.
#
# (b) Napisz funkcję, która oblicza wskaźnik masy ciała BMI (waga/wzrost$^2$) na podstawie wagi (w kilogramach) i wzrostu (w metrach).
#
# (c) Niedawnym odkryciem amerykańskich naukowców jest nowy wzór przeliczający wiek psa (w latach) $d$ na odpowiadający mu wiek człowieka $h$; ma on postać: $h = 16 \ln(d) + 31$, gdzie $\ln$ to tzw. logarytm naturalny (nie przejmuj się, jeśli to nieznane pojęcie). Zaimportuj funkcję `log` z modułu `math` (implementuje ona logarytm naturalny), a następnie napisz krótką funkcję dokonującą tego przeliczenia.
#
# (d) Napisz krótką funkcję (nazwaną powiedzmy `is_palindrome`), która zwraca wartość logiczną `True`/`False` odpowiadającą na pytanie, czy jej argument - będący stringiem - jest palindromem, a więc jest identyczny do swojego lustrzanego odbicia.
# +
# szybkie ćwiczenie 1a - rozwiązanie
# +
# szybkie ćwiczenie 1b - rozwiązanie
# +
# szybkie ćwiczenie 1c - rozwiązanie
# +
# szybkie ćwiczenie 1d - rozwiązanie
# -
# Wszystkie powyższe funkcje były "krótkie" w tym sensie, że dało się je zapisać jako jedno wyrażenie po słowie `return`. Operacje, które funkcja wykonuje, mogą jednakże być dużo bardziej skomplikowane i rozciągać się na wiele linii kodu w ciele funkcji - zob. niżej.
# #### Uwaga techniczna: `def` jest stwierdzeniem wykonywalnym
# `def` jest tzw. stwierdzeniem wykonywalnym ("executable statement"), co oznacza, iż funkcja zaczyna istnieć dopiero wtedy, kiedy Python przy wykonywaniu kodu ("runtime") dochodzi do miejsca z definicją `def`.
# Jedną z implikacji jest to, że funkcję można do woli **redefiniować**, np.:
# +
def f( a , b ): # funkcja f zaczyna istnieć, kiedy Python dochodzi do tego miejsca w kodzie
return a * b
print( f( 5 , 7 ) )
def f( a , b ): # kiedy Python dochodzi tutaj, tworzy inną funkcję o nazwie f - f teraz odnosi się do niej, nie do poprzedniej
return a + b
print( f( 5 , 7 ) )
# -
# Inną implikacją jest to, że `def` może pojawić się w dowolnym miejscu w kodzie, gdzie jakiekolwiek inne stwierdzenie mogłoby się pojawić - np. w instrukcji warunkowej:
# +
flag = False
if flag:
def rescale( x ):
return 10 * x
else:
def rescale( x ):
return x / 10
rescale( 3 )
# -
# ... czy pętli. Może być też zagnieżdżone w innym stwierdzeniu `def`:
def cross_out_all_proper_names( text ):
# definicja funkcji znajdująca się w ciele innej funkcji
def cross_out_proper_name( word ):
if word[ 0 ].isupper():
word_transformed = '-----'
last_symbol = word[ -1 ]
if not last_symbol.isalpha():
word_transformed += last_symbol
elif word.isnumeric():
word_transformed = '?????'
else:
word_transformed = word
return word_transformed
# używamy tak zdefiniowanej funkcji to różnych przekształceń...
crossed_out_words = [ cross_out_proper_name( word ) for word in text.split() ]
crossed_out_text = ' '.join( crossed_out_words )
return crossed_out_text
cross_out_all_proper_names( '<NAME>, together with Gandalf and 13 dwarves, left the Shire in the year of 2941 for a quest to the Lonely Mountain to reclaim the treasure of the dragon Smaug.' )
# Oczywiście, możemy zadać pytanie o _sens_ robienia tego w ten sposób - tu np. nic nie stoi na przeszkodzie, aby funkcję `cross_out_proper_name` zdefiniować wcześniej, a potem tylko wywołać w ciele funkcji `cross_out_all_proper_names`. Wyobraźmy sobie jednak, że nie interesuje nas używanie funkcji `cross_out_proper_name` w innych kontekstach - wówczas "chowając" jej definicję wewnątrz funkcji `cross_out_all_proper_names` chronimy się np. przed jej ponownym użyciem gdzie indziej, co może być niepożądane. Zatem decyzję o sensie tej konstrukcji musi podjąć programista - tu pokazujemy tylko, iż jest możliwe zagnieżdżanie stwierdzeń `def`.
# Z drugiej strony, `def` będące stwierdzeniem oznacza, że nie możemy napisać np. listy zawierającej elementy ze słowami `def`:
# ```
# [ def f1(x): ... , def f2(x): ... , def f3(x): ... ] # niepoprawnie!
# ```
# Te funkcje musimy wcześniej zdefiniować, a potem w liście umieścić tylko ich nazwy:
# ```
# [f1, f2, f3]
# ```
# Np.:
# +
def cm_to_in( cm ):
return cm / 2.54
def cm_to_ft( cm ):
return cm / 30.48
def cm_to_yards( cm ):
return cm / 91.44
for convert_cm in [ cm_to_in , cm_to_ft , cm_to_yards ]:
print( convert_cm( 50 ) )
# -
# ### 2.2. Wariacje na temat `return`
# #### Funkcje zwracające wiele rezultatów jednocześnie
# Nic oczywiście nie stoi na przeszkodzie, aby funkcja zwracała _kolekcję_ obiektów, a więc kilka wartości jednocześnie. Poznaliśmy już kilka rodzajów kolekcji - listy, tuple, zbiory, słowniki - i każda z nich jest dobrym sposobem, aby zwrócić wiele wartości.
# Napiszmy prostą funkcję, która dla danego stringu zwraca jego pierwszą i ostatnią literę, a także jego długość - i robi to w postaci słownika:
# +
def word_stat( text ):
first_letter = text[ 0 ]
last_letter = text[ -1 ]
text_len = len( text )
return {
'first letter' : first_letter ,
'last letter' : last_letter ,
'length' : text_len
} # zwracamy kilka wartości w postaci słownika
word_stat( 'Chatham' )
# -
# Najczęstszym wyborem jest jednak tupla; w składni możemy pominąć nawiasy okrągłe (pakowanie tupli):
# +
def word_stat( text ):
first_letter = text[ 0 ]
last_letter = text[ -1 ]
text_len = len( text )
return first_letter , last_letter , text_len
word_stat( 'Chatham' )
# -
# ... lub nawet:
# +
def word_stat( text ):
return text[ 0 ] , text[ -1 ] , len( text )
word_stat( 'Chatham' )
# -
# Chcąc przypisać wynik takiej funkcji do zmiennej, przypisujemy go do tupli tej samej długości - i również możemy pominąć nawiasy okrągłe (wypakowywanie tupli):
# +
first , last , length = word_stat( 'Chatham' )
print( first )
print( last )
print( length )
# -
# Nie ma tu żadnej magii - funkcja może zwracać obiekt dowolnego typu, w szczególności tuplę czy słownik. Tupla będzie pewnie lepszym wyborem dla funkcji zwracajacych niewielką liczbę wartości, zaś słownik - gdy jest ich więcej i potrzebny jest opis tego, co która wartość znaczy.
# #### Wielokrotny `return`
# Stwierdzenie `return` może pojawić się nie tylko na końcu ciała funkcji - jak do tej pory - ale w dowolnym jego miejscu, a nawet wielokrotnie! Zasada jest taka, że w momencie, w którym Python _pierwszy raz_ dochodzi do któregoś stwierdzenia `return`, wykonywanie funkcji jest natychmiast przerywane i dany rezultat zwracany.
# Dla przykładu, [transformację Collatza](https://en.wikipedia.org/wiki/Collatz_conjecture) (mówiliśmy o niej w Lekcji 4 przy okazji pętli `while`) możemy zapisać "klasycznie":
def collatz( n ):
if n % 2 == 0:
n_transformed = n // 2
else:
n_transformed = 3 * n + 1
return n_transformed
collatz( 19 )
# ... lub też umieszczając `return` na różnych ścieżkach instrukcji warunkowej:
def collatz( n ):
if n % 2 == 0:
return n // 2
else:
return 3 * n + 1
collatz( 19 )
# Inny przykład - napiszmy funkcję obliczającą najmniejszy dzielnik danej liczby naturalnej `n`:
def min_divisor( n ):
if type( n ) is int and n >= 2:
for m in range( 2 , n + 1 ):
if n % m == 0:
return m
else:
return 'Error!'
min_divisor( 187 )
min_divisor( '187' )
# #### Funkcje czyste i modyfikatory
# W powyższych przykładach ciało funkcji kończy się zawsze stwierdzeniem `return`, po którym następuje wyrażenie, jakie funkcja zwraca. Nie jest to jednak konieczność - funkcja może nic nie zwracać! Co zatem nasza "maszynka" - po wrzuceniu do niej "składników" - robi? Ogólnie mówiąc, może zmieniać elementy swojego "otoczenia" - mieć tzw. **efekty uboczne** ("side effects") - np.:
#
# - drukować jakąś wiadomość;
#
# - modyfikować jakiś obiekt istniejący poza funkcją.
#
# Funkcje, z jakimi mieliśmy do tej pory do czynienia, nic nie drukowały, nie zmieniały też nic w swoim otoczeniu, a po prostu produkowały jakiś rezultat - są one nazywane **funkcjami czystymi** ("pure functions"). Odwrotnie, funkcje mające efekty uboczne w postaci np. drukowania lub modyfikacji czegoś zewnętrznego to tzw. **modyfikatory** ("modifiers").
#
# Modyfikatory mogą wprowadzać pewne zamieszanie, gdyż należy pamiętać, co i jak modyfikujemy. Niektóre języki programowania dopuszczają tylko czyste funkcje - to tzw. **programowanie funkcyjne** ("functional programming") - i są pewne dowody na to, że programowanie funkcyjne jest szybsze i mniej podatne na błędy. Modyfikatory bywają jednak użyteczne!
# #### Funkcje nic nie zwracające
# Najprostszym przykładem funkcji nie zwracającej żadnej wartości a mającej efekty uboczne jest funkcja, która coś drukuje:
# +
def greeting( name ):
print( 'Hello, dear ' + name + '!' ) # funkcja greeting nie zwraca żadnej wartości, ma jedynie efekt uboczny - drukowanie
greeting( 'Basia' )
# -
# Zauważmy brak linijki z `return`! Tak naprawdę, nie jest to do końca prawda: formalnie rzecz biorąc, funkcja nie posiadająca stwierdzenia `return` zwraca tzw. wartość `None`. Sprawdźmy to, próbując przypisać wynik powyższej funkcji do zmiennej:
g = greeting( 'Joasia' )
g
g is None
# Kiedy nasza funkcja nie ma słowa `return`, Python automatycznie dodaje linijkę `return None`. Sami moglibyśmy to zrobić, aby podkreślić, że taka funkcja jednak coś zwraca - mianowicie `None`:
# +
def greeting( name ):
print( 'Hello, dear ' + name + '!' )
return None
greeting( 'Basia' )
# -
# (Moglibyśmy też po prostu napisać `return` bez niczego i efekt byłby ten sam.)
# Tak napisana funkcja jest modyfikatorem - modyfikuje swoje "otoczenie" poprzez wydrukowanie wiadomości. Moglibyśmy przepisać ją w formie funkcji czystej następująco:
def greeting_pure( name ):
return 'Hello, dear ' + name + '!'
# Funkcja ta nic nie modyfikuje, jedynie zwraca jakiś rezultat. Możemy sprawdzić przypisując wynik jej wywołania do zmiennej:
# +
p = greeting_pure( 'Basia' )
p
# -
p is None
# Dobrym pomysłem może być unikanie modyfikatorów jeśli nie ma istotnej potrzeby by ich użyć.
# #### Instrukcja `pass`
# Wspomnijmy w tym miejscu o słowie kluczowym `pass` - oznacza ono, "nic nie rób".
def nothing():
pass
nothing()
nothing() is None
# Jednym z zastosowań jest prototypowanie kodu - definiujemy funkcję, która na razie nic nie robi, dopiero później wypełnimy ją treścią. _Ciało funkcji nie może być puste, musi być wciętym blokiem kodu_, zatem jedynym sposobem, aby nic nie robiło, jest użycie `pass`.
# Ta sama zasada obowiązuje dla innych wciętych bloków kodu - nie mogą być puste - np. w instrukcjach warunkowych, czy pętlach, i wtedy również możemy użyć `pass`. Np. zamiast pisać:
# +
x = 10
if x > 1000:
print( f'{x} is large!' )
# -
# ... moglibyśmy napisać instrukcję warunkową obejmującą wszystkie możliwe przypadki:
# +
x = 10
if x > 1000:
print( f'{x} is large!' )
else:
pass
# -
# ### 2.3. Wariacje na temat argumentów
# #### Przekazywanie argumentów do funkcji
# Zdefiniowawszy jakąś funkcję, np.:
def greeting_function( greeting , name ):
print( f'{greeting}, {name}!' )
# ... wywołujemy ją przez **przekazanie** ("pass") jej argumentów w nawiasach okrągłych:
greeting_function( 'Hello' , 'Asia' ) # chcemy, aby argument greeting miał wartość 'Hello', a name wartość 'Asia'
# Mówiąc bardziej precyzyjnie:
#
# - `greeting` i `name` w _definicji_ funkcji to tzw. **parametry**, inaczej **parametry formalne**; zachowują się one jak _zmienne_ zdefiniowane w ciele funkcji;
#
# - kiedy zaś _wywołujemy_ funkcję, to w nawiasach okrągłych przekazujemy tzw. **argumenty**, inaczej **parametry faktyczne**, czyli obiekty, na których to konkretnie ma być wywołana funkcja; tutaj zatem argumentami są obiekty typu string, `'Hello'` i `'Asia'`.
#
# Kiedy wywołujemy funkcję z jakimiś argumentami, następuje tzw. **powiązanie** ("binding") argumentów do parametrów, analogicznie do zwykłego przypisania obiektu do zmiennej:
# ```
# greeting = 'Hello'
# name = 'Asia'
# ```
# Innymi słowy, w ciele funkcji utworzyliśmy zmienne lokalne `greeting` i `name`, które są teraz referencjami ("etykietami") do obiektów `'Hello'` i `'Asia'`.
# #### Argumenty pozycyjne (wymagane)
# Jest dość intuicyjne, że argumenty powinniśmy przekazywać do funkcji _w tej samej kolejności_, jak ustawione są parametry. Innymi słowy, powiązanie parametrów z argumentami następuje wedle kolejności. Pierwszy argument `'Hello'` jest powiązany z parametrem `greeting`, a drugi argument `'Asia'` z parametrem `name`.
#
# Wywoławszy funkcję z inną kolejnością argumentów niż kolejność parametrów doprowadzi rzecz jasna do niepożądanych efektów:
greeting_function( 'Asia' , 'Hello' )
# Mówimy, że są to **argumenty pozycyjne** ("positional arguments"), jako że pozycja argumentów w wywołaniu funkcji musi odpowiadać pozycji parametrów w definicji funkcji.
# Co więcej, także _liczba_ argumentów musi być dokładnie taka, jak parametrów:
greeting_function( 'Hello' )
# Z tego powodu argumenty pozycyjne nazywane są także **argumentami wymaganymi** ("required arguments").
# Argumenty pozycyjne (wymagane) to najprostszy sposób przekazania argumentów do funkcji, lecz w praktyce mogący sprawiać problemy:
#
# - Programista wywołujący funkcję musi dokładnie znać kolejność parametrów w definicji funkcji, co może prowadzić do pomyłek jeśli tych parametrów jest sporo, a sama definicja funkcji jest napisana gdzieś "daleko" (w zupełnie innym fragmencie kodu, czy też w innym module).
#
# - Może to sprawić też problemy z czytelnością kodu z wywołaniem funkcji. Jeśli przeczytasz gdzieś `some_func(13, -1, True, 'all', 0.001)`, to nie jest prosto pamiętać, jakiemu parametrowi ma odpowiadać wartość `13`, a jakiemu `True` itd., bez dokładnego studiowania definicji funkcji. Wywoływanie funkcji z wieloma argumentami pozycyjnymi jest zupełnie nieczytelne!
# #### Argumenty nazwane
# Na szczęście funkcję można wywoływać za pomocą tzw. **argumentów nazwanych** ("keyword arguments"): powiązanie argumentów z parametrami nie następuje wtedy wedle kolejności, lecz poprzez składnię `parameter = argument`, czyli poprzez odwołanie się do parametrów za pomocą ich _nazwy_.
greeting_function( greeting = 'Hello' , name = 'Asia' )
# Wówczas kolejność w wywołaniu nie gra roli - Python wie dokładnie, jaki argument chcemy powiązać z jakim parametrem, gdyż odwołaliśmy się do nich przez nazwy:
greeting_function( name = 'Asia' , greeting = 'Hello' )
# Oczywiście, ciągle _liczba_ argumentów musi się zgadzać:
greeting_function( name = 'Asia' )
# Nie możemy też rzecz jasna użyć nazwy parametru, którego nie ma w definicji funkcji:
greeting_function( name = 'Asia' , sincere_greeting = 'Hello' )
# Zauważmy, jak składnia ta uwypukla fakt, iż powiązanie argumentów z parametrami jest analogiczne do świetnie nam znanego przypisania obiektów do zmiennych.
# Możemy wywoływać funkcję używając obu tych metod jednocześnie, tj. zarówno z argumentami pozycyjnymi, jak i nazwanymi - z tym ograniczeniem, że wszystkie argumenty pozycyjne muszą być _wcześniej_ niż wszystkie argumenty nazwane:
greeting_function( 'Hello' , name = 'Asia' )
# ... ale nie:
greeting_function( greeting = 'Hello' , 'Asia' )
# #### Parametry opcjonalne i domyślne wartości argumentów
# Często dobrą praktyką przy pisaniu funkcji jest powiedzenie użytkownikowi czegoś w stylu: "Dla tego parametru rozsądną wartością jest to czy to." To tzw. **argument domyślny** ("default argument"), zaś parametr taki nazywamy **parametrem opcjonalnym** ("optional parameter").
#
# - parametry opcjonalne w definicji funkcji tworzymy poprzez składnię `parameter = default_argument`;
#
# - wówczas w wywołaniu tej funkcji można takiego argumentu już nie przekazywać (tj. pominąć go) - wówczas Python założy, że chcemy temu parametrowi nadać wartość domyślną;
#
# - można też rzecz jasna wywołać ten parametr z inną wartością, jak zwykle.
#
# Uwaga: Nie pomylmy argumentów domyślnych w definicji funkcji z - dyskutowanym powyżej - wywoływaniem funkcji za pomocą argumentów nazwanych! Składnia wygląda podobnie, `parameter = argument`, lecz różnica jest taka, iż:
#
# - argumenty domyślne podajemy w _definicji_ funkcji (po `def`) jako `parameter = default_argument`;
#
# - argumenty nazwane służą do _wywoływania_ funkcji - i w tym wywołaniu odnosimy się do nich poprzez `parameter = argument`.
#
# Nie wszystkie parametry muszą mieć nadane wartości domyślne - może być to tylko część z nich. Zasada jest tylko taka, że w definicji funkcji wszystkie parametry z wartościami domyślnymi muszą występować _po_ parametrach bez wartości domyślnych.
# Jako przykład zdefiniujmy funkcję, nadając jednemu z jej parametrów wartość domyślną (drugi nie ma wartości domyślnej - musi być w definicji funkcji występować _przed_ tym z wartością domyślną):
def polite_greeting_function( name , greeting = 'Most cordially welcome' ):
print( f'{greeting}, {name}!' )
# ... gdzie podkreślmy, że nie możemy w definicji umieścić parametrów opcjonalnych przed tymi wymaganymi:
def polite_greeting_function( greeting = 'Most cordially welcome' , name ):
print( f'{greeting}, {name}!' )
# Możemy funkcję tę wywołać jak zwykle, podając jej oba argumenty:
polite_greeting_function( 'Asia' , 'Hi' ) # wywołanie za pomocą argumentów pozycyjnych
polite_greeting_function( name = 'Asia' , greeting = 'Hi' ) # wywołanie za pomocą argumentów nazwanych
# Jednakże możemy też każdy z parametrów opcjonalnych _pominąć w wywołaniu_ funkcji - wtedy Python rozumie, że ma on mieć wartość domyślną:
polite_greeting_function( 'Asia' ) # wywołanie za pomocą argumentów pozycyjnych
polite_greeting_function( name = 'Asia' ) # wywołanie za pomocą argumentów nazwanych
# Gdybyśmy oba parametry zdefiniowali jako opcjonalne:
def polite_greeting_function( name = '... ah, what\'s your name?' , greeting = 'Most cordially welcome' ):
print( f'{greeting}, {name}!' )
# ... to możemy rzecz jasna oba pominąć w wywołaniu:
polite_greeting_function()
# ... lub także tylko jeden z nich:
polite_greeting_function( name = 'Kasia' )
polite_greeting_function( greeting = 'Cheers' )
# Uwaga: Powyżej wywoływaliśmy tę funkcję używając składni argumentów nazwanych. Możemy też oczywiście użyć wywołania za pomocą argumentów pozycyjnych:
polite_greeting_function( 'Paulina' , 'Yo' )
# ... i także wtedy możemy parametry opcjonalne pomijać - jednak składnia ma teraz swoje ograniczenia, jako że wywołanie tylko z jednym argumentem odnosi go tylko do pierwszego parametru:
polite_greeting_function( 'Kasia' )
# ... i nie dalibyśmy rady w ten sposób wywołać tej funkcji z parametrem `name` o wartości domyślnej, a parametrem `greeting` o wartości przekazanej w wywołaniu. Innymi słowy, bezpieczniej jest używać wywołania za pomocą argumentów nazawanych i wówczas mamy pełną kontrolę nad tym, któremu parametrowi przekazujemy wartość samodzielnie, a któremu pozostawiamy wartość domyślną.
# #### Funkcje z nieznaną z góry liczbą parametrów wywoływanych pozycyjnie
# Możliwa jest jeszcze następująca "magia" - funkcja przyjmująca dowolną, niezadaną z góry, liczbę argumentów! Robimy to tak: w definicji funkcji, na liście parametrów, umieszczamy parametr poprzedzony operatorem gwiazdki `*` (zob. Lekcja 7); przyjęło się, że parametr ten nazywany jest `args` (choć dowolna nazwa jest dozwolona). Np.:
def my_sum( *args ):
return sum( args )
# Teraz:
#
# - wywołując tę funkcję, możemy podać jej dowolną liczbę argumentów "w miejsce" `*args`,
#
# - te argumenty zostaną spakowane do tupli `args`, dostępnej wewnątrz funkcji.
my_sum( 1 , 2 , 3 ) # wywołanie z trzema argumentami
my_sum( 1 , 2 , 3 , 7 , 11 , -9 ) # wywołanie z sześcioma argumentami
# Wszystkie przekazane tak argumenty pakowane są do tupli o nazwie `args` - i teraz z tuplą tą w ciele funkcji możemy robić, co tylko z tuplą można robić! Tu np. obliczyliśmy sumę elementów tupli za pomocą `sum(args)`. Pakowanie to odbywa się poprzez operator gwiazdki (zob. Lekcja 7):
# +
*args , = 1 , 2 , 3
args
# -
# Inny przykład: Zdefiniujmy funkcję, która będzie miała dwa zwykłe parametry wymagane, a po nich dowolną liczbę parametrów wywoływanych pozycyjnie - to jest ogólna zasada, że `*args` musi być w definicji _po_ wszystkich parametrach wymaganych. Funkcja ta niech wydrukuje numer argumentu opcjonalnego i jego wartość - iterując się przez tuplę `args` w dobrze nam znany sposób:
def print_args_with_comments( header , footer , *args ): # zwykłe argumenty pozycyjne muszą być na lewo od gwiazdki
print( header )
for i , arg in enumerate( args ): # args to tupla, tu się przez nią iterujemy
print( f'Argument number {i} is {arg}.' )
print( footer )
print_args_with_comments( 'List of arguments:' , 'That is all, folks!' , 15.3 , 'orange juice' , [ 1 , 2 , 3 ] , 0 )
# Jako kolejny przykład, przypomnijmy sobie funkcję wbudowaną `range`. Może ona przyjmować jeden, dwa lub trzy argumenty:
list( range( 10 ) )
list( range( 5 , 10 ) )
list( range( 5 , 10 , 2 ) )
# Spróbujmy teraz napisać samodzielnie tę funkcję - i to w ogólniejszej postaci, polegającej na tym, że każdy z argumentów może być dowolną liczbą zmiennoprzecinkową, niekoniecznie całkowitą; nasza funkcja będzie też od razu zwracać listę, bez potrzeby konwersji na listę j.w.
#
# Skoro możemy mieć jeden, dwa albo trzy argumenty, użyjmy konstrukcji z operatorem gwiazdki - przekazujemy argumenty funkcji za pomocą `*args`. Teraz `args` w ciele funkcji jest tuplą. Sprawdzamy najpierw, czy jej długość to 1, 2 albo 3 - jeśli nie, zwracamy komunikat z błędem. Jeśli tak, to w zależności od jej długości, tworzymy zmienne `start`, `stop` i `step`, do których odpowiednio przypisujemy elementy tupli `args`, w zależności od jej długości. Np. jeśli długość tupli `args` jest 1 (czyli podaliśmy jej jeden argument), to `start` ma być równe 0, `stop` ma być równe temu jednemu argumentowu, zaś `step` ma być równe 1. Finalnie, listę wynikową konstruujemy iteracyjnie, pętlą `while`.
def my_range( *args ):
if len( args ) not in [ 1 , 2 , 3 ]:
return 'Error! Please pass one, two or three arguments to the function.'
else:
# w zależności od liczby przekazanych argumentów, zdefiniujmy początek (start), koniec (stop) i krok (step) naszego przedziału
if len( args ) == 1:
start , stop , step = 0 , args[ 0 ] , 1
elif len( args ) == 2:
start , stop , step = args[ 0 ] , args[ 1 ] , 1 # lub: start , stop , step = *args , 1
elif len( args ) == 3:
start , stop , step = args
# zdefiniujmy znak kroku - on będzie określał, czy idziemy "do przodu", czy w "tył"
if step > 0:
step_sign = 1
elif step < 0:
step_sign = -1
else:
return 'Error! Step should be non-zero.'
# tworzymy nasz rezultat - listę range_list - iteracyjnie
# zaczynamy od "pustego pudełka" [] i od pierwszego elementu na liście, item = start
# w każdym kroku iteracji dodajemy item do "pudełka" i zwiększamy item o step
# robimy to dopóki item < stop (dla kroku dodatniego) albo item > stop (dla kroku ujemnego), co łącznie można zapisać jako (stop - item) * step_sign > 0
range_list = []
item = start
while ( stop - item ) * step_sign > 0:
range_list.append( item )
item += step
return range_list
# Przykłady wywołania:
my_range( 10.3 )
my_range( 5.4 , 10.5 )
my_range( 5.4 , 10.5 , 1.5 )
my_range( 10 , 5 , -1 )
my_range( 10.5 , 5.4 , -1.5 )
my_range( 10.5 , 5.4 , 0 )
my_range()
my_range( 1 , 2 , 3 , 4 )
# #### Funkcje z nieznaną z góry liczbą parametrów wywoływanych przez argumenty nazwane
# Analogiczna konstrukcja służy do przekazania funkcji dowolnej, z góry nieznanej, liczby argumentów nazwanych.
#
# - W definicji funkcji, na liście parametrów, umieszczamy argument poprzedzony operatorem dwóch gwiazdek `**` (zob. Lekcja 8). Jego standardowa nazwa to `kwargs` (od "keyword arguments"), choć dowolna nazwa jest dozwolona.
#
# - W ciele funkcji zmienna `kwargs` będzie traktowana jako słownik, którego klucze odpowiadać będą nazwom parametrów, zaś wartości ich przekazanym wartościom.
#
# - W wywołaniu funkcji, w miejsce `kwargs`, podajemy dowolną liczbę argumentów nazwanych - zgodnie z odpowiednią składnią, `parameter = argument`.
# Prosty przykład takiej funkcji, która wydrukuje nazwę argumentu i jego przekazaną wartość:
def print_data( title , **kwargs ):
print( title )
print( len( title ) * '-' )
for key , value in kwargs.items(): # kwargs to słownik; tutaj iterujemy po jego kluczach i wartościach
print( key + ': ' + str( value ) )
print_data( 'Phone directory entry' , name = 'Magda' , phone = 123456 )
print_data( 'Library entry' , book = 'Lord of the Rings' , author = '<NAME>' , year = 1954 , edition = '50th anniversary' , pages = 1184 )
# #### Wszystkie rodzaje parametrów jednocześnie
# Wszystkie te sposoby mogą być stosowane jednocześnie - trzeba jedynie pilnować ich kolejności:
#
# - najpierw znane argumenty pozycyjne/wymagane;
#
# - następnie `*args`;
#
# - następnie znane argumenty z wartościami domyślnymi;
#
# - na końcu `**kwargs`.
# Jako przykład, skomplikujmy trochę powyższą funkcję drukującą informacje - niech wydrukuje nam teraz pewne informacje o wpisie na blogu:
# +
def print_blog_post_data( ):
# -
# #### Przestrzeń nazw
# Do tej pory wszystkie nasze programy składały się z ciągu linii kodu, wykonywanych jedna po drugiej. Nawet gdy była to instrukcja warunkowa czy pętla, interpreter Pythona szedł przez nie "krok po kroku", w "ciągły" sposób. Obecność definicji funkcji w programie zmienia ten obraz diametralnie - definicja funkcji stanowi osobną tzw. **przestrzeń nazw** ("namespace"), oddzieloną od głównej części programu w "nieciągły" sposób.
#
# Przestrzeń nazw jest to region programu, gdzie "żyją" nazwy zmiennych. Możemy mieć zmienne o tej samej nazwie, ale "żyjące" w odrębnych przestrzeniach nazw, i nie będą one ze sobą interferowały. W szczególności, w definicji funkcji możemy używać zmiennych o tych samych nazwach, co "na zewnątrz" tej definicji, i nie doprowadzi to do zamieszania. Upraszcza to pisanie kodu - pisząc funkcję, nie musisz przejmować się, że jakieś nazwy zmiennych zostały już użyte.
# Mówiąc bardziej szczegółowo, zmienne zdefiniowane "na zewnątrz" jakiejkolwiek funkcji są **globalne** (to tzw. "global scope"). Są one dostępne dla wszystkich, także z ciała funkcji:
# +
x = 5
def func():
print( x )
func()
# -
# Jeśli zdefiniujesz zmienną o tej samej nazwie **lokalnie**, "wewnątrz" funkcji (to tzw. "local scope"), Python potraktuje je mimo to jako odrębne "etykiety".
# +
x = 5
def func( x ):
x = 10
print( f'x inside the function equals {x}.' )
func( x )
print( f'x outside the function equals {x}.' )
# -
# Okazuje się, iż możemy dostać dostęp do regionu zewnętrznego z wewnątrz funkcji - służy do tego słowo kluczowe `global`.
# +
x = 5
def func():
global x
x = 10
print( f'x inside the function equals {x}.' )
func()
print( f'x outside the function equals {x}.' )
# -
# Czy oznacza to, że nie jesteśmy w stanie zmienić wartości argumentów podanych do funkcji poprzez operacje wewnątrz funkcji? Jesteśmy w stanie - jeśli mamy do czynienia z argumentami _mutowalnymi_, jak listy czy słowniki.
# +
def cross_out_first( lst ):
lst[ 0 ] = '---'
# lst = ['---', 'bar', 'baz', 'qux']
my_lst = [ 'foo' , 'bar' , 'baz' , 'qux' ]
cross_out_first( my_lst )
my_lst
# -
# ... i zmodyfikowaliśmy argument podany do funkcji! Chodzi o to, że ciało funkcji nie próbuje przypisać do `lst` innego obiektu, ale _zmodyfikować_ (mutowalny!) obiekt, do którego referencja jest funkcji przekazana.
# Jest to dobry przykład modyfikatora. Widać również, jak wiele takie funkcje mogą wprowadzić zamieszania, kiedy przestaniemy kontrolować ich efekty uboczne. Dlatego dobrym wyborem jest pisanie funkcji czystych.
# +
# late binding - at call time
# +
global_var = 'foo'
def my_function():
print(global_var)
global_var = 'bar'
my_function()
# -
f_list = []
for i in range(3):
def f():
return i
f_list.append( f )
f_list
for f in f_list:
print( f() )
# +
# list.append as a modifier; list.pop as a modifier returning value
# -
def my_append( lst , item ):
lst[ len( lst ): ] = [ item ]
# +
my_lst = [ 1 , 2 , 3 , 4 ]
my_append( my_lst , 5 )
my_lst
# -
def my_pop( lst , item_idx ):
item = lst[ item_idx ]
lst[ item_idx:( item_idx + 1 ) ] = []
return item
my_lst
my_pop( my_lst , 2 )
my_lst
# +
# early binding - default values
# -
# Default values (the right-hand i in i=i is a default value for argument name i, which is the left-hand i in i=i) are looked up at def time, not at call time, so essentially they're a way to specifically looking for early binding.
f_list = []
for i in range(3):
def f( x = i ):
return x
f_list.append( f )
for f in f_list:
print( f() )
def new( lst = [] ):
lst.append( 'I\'m new here!' )
return lst
new()
new()
new()
# +
# first-class
# -
def create_empty( type_name ):
return type_name()
for t in [ int , float , str , list , tuple , set , dict ]:
print( create_empty( t ) )
# +
# decorators
# +
# decorators wrap a function, modifying its behavior
# +
def my_decorator(func):
def wrapper():
print("Something is happening before the function is called.")
func()
print("Something is happening after the function is called.")
return wrapper
def say_whee():
print("Whee!")
say_whee = my_decorator(say_whee)
say_whee()
# -
say_whee
# +
def my_decorator(func):
def wrapper():
print("Something is happening before the function is called.")
func()
print("Something is happening after the function is called.")
return wrapper
def say_whee():
print("Whee!")
say_whee = my_decorator(say_whee)
say_whee()
# +
def do_twice(func):
def wrapper_do_twice( *args , **kwargs ):
func( *args , **kwargs )
func( *args , **kwargs )
return wrapper_do_twice
@do_twice
def say_whee():
print("Whee!")
say_whee()
# +
def print_function_use(func):
def wrapper( *args , **kwargs ):
result = func( *args , **kwargs )
print( f'Using... {func.__name__} with {args} and {kwargs}, returning {result}.' )
return result
return wrapper
@print_function_use
def say_whee():
print("Whee!")
say_whee()
# -
@print_function_use
def say_hi( name , greet = 'Hi' ):
print( f'{greet}, {name}.' )
say_hi( 'Magda' , greet = 'Hello' )
# +
PLUGINS = {}
def register(func):
PLUGINS[func.__name__] = func
return func
@register
def say_hello(name):
return f"Hello {name}"
@register
def say_hi(name):
return f"Hi {name}"
# -
PLUGINS[ 'say_hi' ]('Ania')
# +
from collections import defaultdict
PLUGINS = defaultdict( int )
def register(func):
PLUGINS[ func.__name__ ] += 1
return func
@register
def say_hello(name):
return f"Hello {name}"
# -
PLUGINS
say_hello('A')
# +
def repeat( n ):
def do_n( func ):
def wrapper( *args , **kwargs ):
for _ in range( n ):
func( *args , **kwargs )
return wrapper
return do_n
@repeat( n = 10 )
def say_whee():
print("Whee!")
say_whee()
# +
# Stateful Decorators
# +
# we talked about pure functions returning a value based on given arguments. Stateful decorators are quite the opposite, where the return value will depend on the current state, as well as the given arguments.
# -
say_whee.__dict__
# +
def log_function_use(func):
def wrapper( *args , **kwargs ):
result = func( *args , **kwargs )
wrapper.__dict__[ 'n_calls' ] += 1
print( 'Call ' + str( wrapper.__dict__[ 'n_calls' ] ) )
return result
wrapper.n_calls = 0
return wrapper
@log_function_use
def say_whee():
print("Whee!")
say_whee()
# -
say_whee.__dict__
say_whee()
say_whee.__dict__
# ## 3. Tematy zaawansowane
# ### 3.1. Rekursja
# <img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/recursion.png' width = '300px'>Seymour Papert, współtwórca języka programowania [Logo](https://en.wikipedia.org/wiki/Logo_(programming_language)), znanego wielu z nas ze szkoły podstawowej (grafika żółwia), stwierdził:
#
# > "Of all ideas I have introduced to children, recursion stands out as the one idea that is particularly able to evoke an excited response."
#
# **Rekursja** ("recursion") to bardzo [ogólna idea](https://en.wikipedia.org/wiki/Recursion), polegająca na tym, że **funkcja w swoim ciele odwołuje się do siebie samej**. Pierwszą myślą jest, że może spowodować to nieskończoną lawinę odwołań: funkcja woła samą siebie, ta znów woła samą siebie... i tak dalej, w nieskończoność ("infinite regress"). Ten pozorny paradoks jest źródłem tzw. "humoru rekursji". W niektórych książkach o programowaniu, indeks pojęć zawiera żartobliwy wpis:
#
# > Recursion, _see Recursion_.
#
# Kiedy w wyszukiwarkę Google wpiszesz hasło "recursion", żartobliwie podpowiada ona:
#
# > Did you mean: _recursion_
#
# Pewna osoba zapytana na rozmowie rekrutacyjnej o zdefiniowanie rekursji, wzięła kartkę papieru i napisała po jej obu stronach:
#
# > Please turn over.
#
# Innym przykładem są tzw. ["rekursywne akronimy"](https://en.wikipedia.org/wiki/Recursive_acronym), np. nazwa popularnego języka programowania PHP, pierwotnie oznaczająca "Personal Home Page", teraz tłumaczona jest jako "PHP: Hypertext Preprocessor".
#
# <img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/print_gallery.jfif' width = '300px'>Jest wiele innych kontekstów rekursji, np. w geometrii (fraktale - ich coraz mniejsze części wyglądają jak całość), czy sztuce (np. litografia Eschera "Print Gallery", technika ["Mise en abyme"](https://en.wikipedia.org/wiki/Mise_en_abyme)).
#
# W praktyce unikamy nieskończonego regresu poprzez umieszczenie odpowiedniego warunku brzegowego. Musimy mieć zatem dwa komponenty:
#
# - Prosty, podstawowy, **warunek brzegowy** - krańcowy scenariusz, który _nie_ używa rekursji w swojej definicji.
#
# - **Krok rekursywny** - odwołanie funkcji do samej siebie, ale w taki sposób, który przybliża nas coraz bardziej do warunku brzegowego.
#
# Klasycznym przykładem definicji rekursywnej jest dobrze nam znany ciąg Fibonacciego, dla którego:
#
# - warunkiem brzegowym jest $F_0 = 0$, $F_1 = 1$;
#
# - krokiem rekursywnym jest wołanie samego siebie, $F_n = F_{n - 1} + F_{n - 2}$, dla $n \geq 2$.
#
# Widzimy tu zatem, iż chociaż mamy to odwołanie do samego siebie, to krok rekursywny prowadzi coraz bliżej warunku brzegowego, gdzie rekursja się zatrzymuje.
# W informatyce słowo "rekursja" ma zwykle nieco bardziej ograniczone znaczenie i dotyczy _funkcji_, które w swoim ciele wołają same siebie. Jako jeszcze prostszy niż ciąg Fibonacciego przykład rekursji rozważmy obliczenie sumy liczb od 0 do `n`. Możemy oczywiście zrobić to funkcją wbudowaną:
sum( range( 101 ) )
# ... ale spróbujmy sami zdefiniować tę sumę rekurencyjnie. Jeśli zdefiniujemy $S_n = 0 + 1 + 2 + \ldots + n$, to widzimy, iż:
#
# - krok rekurencyjny to $S_n = n + S_{n - 1}$,
#
# - warunek brzegowy to $S_0 = 0$.
#
# Zakodujmy to poznaną składnią `def`:
def sum_recursion( n ):
if n == 0:
return 0
else:
return n + sum_recursion( n - 1 )
sum_recursion( 100 )
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 2: Silnia.
#
# Innym klasycznym przykładem definicji rekursywnej jest silnia, $n! = 1 \cdot 2 \cdot \ldots \cdot n$, czyli iloczyn kolejnych liczb naturalnych. Możemy tę definicję wyrazić jako:
#
# - krok rekursywny: $n! = n \cdot (n - 1)!$;
#
# - warunek brzegowy: $1! = 1$.
#
# Napisz rekursywną definicję tej funkcji (nazwijmy ją `factorial_recursion`).
# +
# szybkie ćwiczenie 2 - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 3: Funkcja Ackermanna.
#
# Bardzo ciekawym przykładem funkcji zdefiniowanej rekursywnie jest [funkcja Ackermanna](https://en.wikipedia.org/wiki/Ackermann_function). (Jest to najprostszy znany przykład funkcji, która nie jest "pierwotnie rekursywna", co z grubsza oznacza, iż nie da jej się zapisać wyłącznie za pomocą pętli `for`.)
# \begin{equation}
# A( m , n ) =
# \begin{cases}
# n + 1 & \text{jeśli $m = 0$}\\
# A( m - 1 , 1 ) & \text{jeśli $m > 0$ i $n = 0$}\\
# A( m - 1 , A( m , n - 1 ) ) & \text{jeśli $m > 0$ i $n > 0$}
# \end{cases}
# \end{equation}
#
# Napisz rekursywną definicję tej funkcji (nazwijmy ją `ackermann`). Jej wartości rosną w niezwykle szybkim tempie, podobnie jak liczba wywołań potrzebnych do dojścia do warunku brzegowego, więc nie próbuj wywoływać już nawet `ackermann(4, 1)`. Natomiast `ackermann(3, 1)` jest równe 13.
# +
# szybkie ćwiczenie 3 - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 4: Największy wspólny dzielnik.
#
# Obliczenie największego wspólnego dzielnika (GCD = "greatest common divisor") dwóch liczb naturalnych `m` i `n` (czyli największej takiej liczby, przez którą `m` i `n` obie dzielą się bez reszty) można dokonać tzw. [algorytmem Euklidesa](https://en.wikipedia.org/wiki/Euclidean_algorithm):
# \begin{equation}
# GCD( m , n ) =
# \begin{cases}
# m & \text{jeśli $n = 0$}\\
# GCD( n , m \% n ) & \text{jeśli $n > 0$}
# \end{cases}
# \end{equation}
# ... gdzie `m % n` to reszta z dzielenia `m` przez `n`. Zdefiniuj tę funkcję `GCD` rekursywnie.
#
# Sprawdź swoją odpowiedź: GCD liczb 1386 i 3213 wynosi 63.
# +
# szybkie ćwiczenie 4 - rozwiązanie
# -
# Na koniec pewna uwaga praktyczna: Zdefiniujmy najpierw rekursywnie ciąg Fibonacciego, ale dodajmy do niego instrukcję `print`, sprawdzającą, ile razy funkcja wywołała samą siebie:
def fibonacci_recursion( n ):
print( f'Calling F({n}).' )
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci_recursion( n - 1 ) + fibonacci_recursion( n - 2 )
fibonacci_recursion( 5 )
# Problem z tą implementacją jest taki, że niepotrzebnie wielokrotnie obliczamy te same elementy ciągu. Rozwiązaniem jest przechowywanie raz obliczonych elementów ciągu w pamięci tymczasowej ("cache"), a następnie jedynie odwoływanie się do nich w razie potrzeby. Można dokonać tego następującym kodem - którego na razie nie tłumaczmy! `lru_cache` to tzw. **dekorator**.
# +
from functools import lru_cache
@lru_cache( maxsize = None )
def fibonacci_recursion( n ):
print( f'Calling F({n}).' )
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci_recursion( n - 1 ) + fibonacci_recursion( n - 2 )
fibonacci_recursion( 5 )
# -
# ### 3.2. Funkcje anonimowe (`lambda`)
# Mamy już sporo doświadczenia z definiowaniem funkcji za pomocą słowa kluczowego `def`. Konstrukcja ta ma nieograniczone możliwości - dowolne zachowanie jesteśmy w stanie wyabstrahować w postaci funkcji zdefiniowanej przez `def`. W tej sekcji zajemiemy się drugą metodą definicji funkcji, tzw. **funkcjach anonimowych**, znanych też jako funkcje `lambda`. (Wspominaliśmy już o nich w materiałach dodatkowych do Lekcji 6, na temat technik `reduce`, `map`, `filter`. Spotkaliśmy je też przy okazji omawiania typu `defaultdict` w Lekcji 8.)
#
# Najpierw składnia: są one definiowane przy użyciu słowa kluczowego `lambda`, a nie `def`, w postaci:
# ```
# lambda arguments : expression
# ```
#
# Są to generalnie takie same "maszynki", które przyjmują argumenty `arguments` i zwracają wyrażenie `expression` - z trzema ogólnymi różnicami wobec "pełnoprawnych funkcji":
#
# - powinny być "krótkie" - rezultat jest ograniczony do jednego wyrażenia `expression`, a więc operacja, którą definiują, musi dać się "krótko" zapisać (choć jak widzieliśmy, nawet dość skomplikowane operacje da się tak zapisać, choćby przy użyciu "list comprehension");
#
# - są **anonimowe** ("anonymous") - nie mają nazwy (można je przypisać do zmiennej, ale to nie jest nazwa funkcji!);
#
# - z formalnego punktu widzenia, `lambda` to wyrażenie ("expression"), a nie stwierdzenie ("statement") jak `def` - oznacza to, iż można je umieszczać w miejscach niedozwolonych dla stwierdzeń, np. jako argument przy wywoływaniu innej funkcji, element listy itd.
#
# Funkcje `lambda` są bardzo użyteczne w różnych kontekstach, gdzie liczy się szybka, zwięzła operacja funkcyjna.
# Np. prosta funkcja matematyczna z początku tej lekcji w formie `lambda`, przypisana do zmiennej o nazwie `fun`:
# +
fun = lambda x : 2 * x + 5
fun( 7 )
# -
# Funkcje `lambda` powinny być "krótkie", natomiast jedno wyrażenie może robić więcej niż się wydaje! Z drugiej strony, jeśli chcemy tworzyć naprawdę złożone funkcje, zwykle dobrym pomysłem jest jednak użycie `def` - funkcje `lambda` zaprojektowane są z myślą o krótkich fragmentach kodu wykonywanego w tej samej linijce, w której są wstawione. Tu np. funkcja `lambda` z dwoma argumentami, listą liczb `amounts` i stringiem `currency`, zwracająca listę, gdzie każda z liczb zmieniona jest na string wypisujący tę liczbę, z dodanym po spacji słówkiem `currency`:
# +
curr = lambda amounts , currency : [ str( amount ) + ' ' + currency for amount in amounts ]
curr( [ 1.5e7 , 2.3e8 , 7.8e6 , -3.9e7 ] , 'USD' )
# -
# Inny przykład:
# +
is_even = lambda n : n % 2 == 0
is_even( 8 )
# -
# Funkcja `lambda` może też nie przyjmować żadnych argumentów, np.:
# +
always = lambda : 'Always there!'
always()
# -
# ... a funkcje takie są przydatne np. przy tworzeniu słowników `defaultdict`.
# #### Przykład: Konstrukcja "switch"
# Funkcje `lambda` można umieszczać w miejscach niedozwolonych dla stwierdzeń `def`, np. jako elementy list czy słowników. Wyobraźmy sobie np. "tabelę akcji" (tzw. **"switch"**), czyli słownik, w którym klucze to różne przypadki, a wartości to proste funkcje ("akcje"), wykonywane w danym przypadku:
# +
action = {
'case A' : lambda x : x + 5 ,
'case B' : lambda x : 2 * x - 3 ,
'case C' : lambda x : x ** 2 + 1 ,
'case D' : lambda x : abs( x ) - 7
}
action[ 'case C' ]( 3.64 ) # action[ 'case C' ] jest funkcją, którą wywołujemy z argumentem o wartości 3.64
# -
# W ten sposób możemy zdefiniować paletę prostych zachowań zależnych np. od decyzji użytkownika, np.:
# +
from math import pi
cylinder = {
'total volume' : lambda r , h : pi * r ** 2 * h ,
'total surface' : lambda r , h : 2 * pi * r * h + 2 * pi * r ** 2 ,
'side surface' : lambda r , h : 2 * pi * r * h
}
r = float( input( 'Enter cylinder\'s radius:' ) )
h = float( input( 'Enter cylinder\'s height:' ) )
what = input( 'What would you like to calculate (total volume, total surface, side surface):' )
print( f'{what.capitalize()} of your cylinder is {cylinder[ what ]( r , h )}.' )
# -
# #### Przykład: Sortowanie według klucza
# Inne przydatne zastosowanie funkcji `lambda` ma związek z **sortowaniem**. Rozważmy kolekcję obiektów, dla których zdefiniowane jest pojęcie "porządku" (mniejszy/większy), np. liczb czy stringów (porządek leksykograficzny). Możemy je posortować funkcją wbudowaną `sorted`:
sorted( [ 5 , 2 , 4 , 1 , 3 ] )
sorted( [ 'ABCdef' , 'abcde' , 'aBcd' , 'Abc' ] ) # wielkie litery są "mniejsze" od małych liter
sorted( 'unununium' )
sorted( { 2 : 'a' , 1 : 'b' } ) # sortuje klucze słownika
# Funkcja `sorted` nie modyfikuje oryginalnej kolekcji, lecz zwraca nową kolekcję, z posortowanymi elementami. Typ `list` ma także analogiczną metodę `sort`, która sortuje listę "w miejscu", np.:
# +
lst = [ 3 , 2 , 1 ]
lst.sort()
lst
# -
# ... ale skupmy się jednak na funkcji `sorted`; jest ona choćby ogólniejsza od metody `sort`, jako że ta jest metodą stricte list, natomiast funkcja `sorted` działa na ogólniejszych kolekcjach.
# Funkcja `sorted` ma opcjonalny argument nazwany `reverse`, z wartością domyślną `False`; kiedy ustawi się ją na `True`, sortowanie przebiega od największego do najmniejszego:
sorted( [ 5 , 2 , 4 , 1 , 3 ] , reverse = True )
# Ma ona też drugi opcjonalny argument nazwany, `key`, na którym to właśnie się skupmy. Argument ten powinien być funkcją o jednym argumencie, która zwraca "klucz", po którym będziemy sortować. Klasycznym przykładem jest sortowanie listy tupli, gdzie możemy zechcieć np. sortować po drugim elemencie:
# +
kids = [ ( 'Ania' , 15 ) , ( 'Basia' , 11 ) , ( 'Ela' , 13 ) ]
sorted( kids , key = lambda x : x[ 1 ] )
# -
# Funkcja przekazana przez argument `key` robi co następuje:
#
# - Jest ona aplikowana do każdego elementu kolekcji. Tutaj mamy listę tupli 2-elementowych `kids`. Funkcja podana w argumencie `key` jest aplikowana do wszystkich tych tupli po kolei. Jak widzimy, dla każdej takiej tupli zwraca jej drugi element. Zatem efektem zaaplikowania tej funkcji do wszystich elementów będzie lista `[15, 11, 13]`.
#
# - Dopiero tak otrzymaną listę sortujemy. Tutaj dostajemy `[11, 13, 15]`, co przekłada się na porządek oryginalnej listy `
# [('Basia', 11), ('Ela', 13), ('Ania', 15)]`.
# Często w tym miejscu stosuje się funkcję `lambda` j.w. Nie musi tak jednak być. Oto przykład, gdzie sortujemy listę stringów, ale nie biorąc pod uwagę wielkości liter (jak pamiętamy, wielkie litery są "mniejsze" od małych - tu chcemy tę własność zaniedbać):
sorted( [ 'ABCdef' , 'abcde' , 'aBcd' , 'Abc' ] , key = str.lower )
# Argumentem `key` jest tu funkcja z jednym argumentem, `str.lower`, zamieniająca wielkie litery na małe (`lower` to metoda stringów, ale pamiętamy, iż metody to też funkcje, tylko trzeba je odpowiednio nazwać: nazwa typu, kropka, nazwa metody). Ona zatem najpierw konwertuje naszą listę na `[ 'abcdef' , 'abcde' , 'abcd' , 'abc' ]` i to ją sortuje, `
# ['abc', 'abcd', 'abcde', 'abcdef']`, co przekłada się na powyższą postać listy oryginalnej.
# Inny przykład: Rozważmy listę:
list( range( -5 , 6 ) )
# ... i posortujmy ją według klucza będącego wartością bezwzględną:
sorted( range( -5 , 6 ) , key = abs )
# Co tu się stało? Funkcja z jednym argumentem `abs` zaaplikowana do każdego elementu oryginalnej listy dała listę `[5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5]`. To tę listę sortujemy, co przekłada się na powyższy wynik.
#
# Pojawia się przy okazji ciekawe spostrzeżenie: w tej przekonwertowanej liście są elementy o _tej samej wartości_. Jak zatem są one sortowane? Odpowiedź na to pytanie pojawia się w dokumentacji Pythona: sortowania mają zagwarantowaną **stabilność**. [Stabilność algorytmu sortującego](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) oznacza, iż elementy o tym samym kluczu pozostawione są w oryginalnej kolejności. Zatem np. skoro elementy -1 i 1 oryginalnej listy mają ten sam klucz (tj. tę samą wartość 1 po przekonwertowaniu funkcją `abs`), to pozostawione zostają w oryginalnej kolejności, czyli -1 przed 1.
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 5: Jak zmodyfikować powyższy przykład, aby po posortowaniu dostać listę `[0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5]`, gdzie -1 jest _po_ 1 itd.?
# Wskazówka: Wykorzystaj stabilność sortowania i odpowiednio zmodyfikuj oryginalną listę.
# +
# szybkie ćwiczenie 5 - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 6: Masz daną listę `numerals`, jak niżej.
#
# (a) Posortuj ją według długości słów, od najdłuższego do najkrótszego.
#
# (b) Posortuj ją według ostatniej litery (w normalnym porządku leksykograficznym).
#
# (c) Posortuj ją według liczby wystąpień litery `'e'` w nich.
numerals = [ 'zero' , 'one' , 'two' , 'three' , 'four' , 'five' , 'six' , 'seven' , 'eight' , 'nine' ]
# +
# szybkie ćwiczenie 6a - rozwiązanie
# +
# szybkie ćwiczenie 6b - rozwiązanie
# +
# szybkie ćwiczenie 6c - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Szybkie ćwiczenie 7: Masz daną listę `origami_masters` imion i nazwisk; masz także osobny słownik `birthdays`, którego kluczami są te imiona, a wartościami stringi z datami urodzin.
#
# (a) Posortuj listę `origami_masters` według nazwisk.
#
# (b) Posortuj listę `origami_masters` według dat urodzenia, zapisanych w słowniku `birthdays`.
# +
origami_masters = [
'<NAME>' ,
'<NAME>' ,
'<NAME>' ,
'<NAME>' ,
'<NAME>' ,
'<NAME>' ,
'Sipho Mabona'
]
birthdays = {
'<NAME>' : '1955-11-26' ,
'<NAME>' : '1981-06-06' ,
'<NAME>' : '1911-03-14' ,
'<NAME>' : '1956-11-15' ,
'<NAME>' : '1961-05-04' ,
'<NAME>' : '1957-01-08' ,
'Sipho Mabona' : '1980-01-11'
}
# +
# szybkie ćwiczenie 7a - rozwiązanie
# +
# szybkie ćwiczenie 7b - rozwiązanie
# -
# ### 3.3. Typowanie
# #### "Duck typing"
# Jedną z pięknych i bardzo użytecznych cech Pythona jest tzw. **"duck typing"**. Oznacza to ogólnie, że dana operacja (funkcja, metoda, operator...) może przyjąć jako argument obiekt _dowolnego typu_, jeśli tylko jest ona dla tego typu zdefiniowana. Innymi słowy, ta sama operacja może być zdefiniowana dla obiektów różnych typów, dla każdego typu dając inny efekt - specyficzny dla tego typu.
#
# Klasycznym przykładem w Pythonie jest operator `+`, który dla liczb (typy `int` i `float`) oznacza dodawanie arytmetyczne:
9 + 16
# Ten sam operator `+` zastosowany jednak do dwóch list ma zupełnie inne znaczenie - łączy te listy ze sobą:
[ 'apple' , 'orange' , 'pear' ] + [ 'potato' , 'cabbage' ]
# ... a zastosowany do stringów, łączy je w jeden string:
'Jan' + ' ' + 'Kowalski'
# Typ danych obiektu jest zatem mniej istotny od zdefiniowanych dla niego operacji. Skoro `+` jest zdefiniowany dla typów `int`, `float`, `list`, `str`, to pisząc wyrażenie z tym operatorem nie musimy sprawdzać typów - wystarczy wiedzieć, że operacja jest dla tych typów zdefiniowana.
#
# Nazwa "duck typing" pochodzi od tzw. ["testu kaczki"](https://en.wikipedia.org/wiki/Duck_test) i powiedzenia:
#
# > "If it walks like a duck, and it quacks like a duck, then it must be a duck."
#
# Nie powinniśmy martwić się zatem, czy obiekt "jest kaczką" (czy jest liczbą, listą, stringiem...), a jedynie o to, czy "kwacze" (czy umie stosować `+`). Cokolwiek, co "kwacze" się nadaje, nawet jeśli nie jest "kaczką".
#
# Logika tu jest taka, iż wspomniane obiekty - liczby, listy, stringi - "wiedzą", co to jest `+`; w definicji tych obiektów zawarta została definicja operatora `+`, opisująca, co on na obiektach tych robi. "Duck typing" działa automatycznie, jeśli tylko użyte obiekty "wiedzą" o istnieniu danej operacji.
8 + [ 2 ] # operator + nie jest zdefiniowany między liczbą a listą!
# Innym klasycznym przykładem jest funkcja wbudowana `len`, którą można wywołać np. na stringach, listach, tuplach, zbiorach, słownikach.
# To samo oczywiście stosuje się do naszych własnych funkcji. Zdefiniujmy np. funkcję, która przyjmuje dwa argumenty - w zamyśle listy - i zwraca ich elementy wspólne:
def intersect( seq_1 , seq_2 ):
return [ item for item in seq_1 if item in seq_2 ]
# Działa ona oczywiście na listach:
intersect( [ 5 , 8 , -3 , 11 , 9 ] , [ 9 , 4 , 5 ] )
# ... ale po przyjrzeniu się jej treści widzimy, że równie dobrze możemy zastosować ją np. do dwóch stringów, ponieważ stringi "wiedzą", co to pętla `for` i co to operator `in`:
intersect( 'monkey' , 'donkey' )
# Jeden argument może być listą, drugi stringiem, nie ma problemu:
# +
digraphs = [ 'ch' , 'cz' , 'dz' , 'dź' , 'dż' , 'rz' , 'sz' ]
text = 'w szczebrzeszynie chrząszcz brzmi w trzcinie'
intersect( digraphs , text )
# -
# #### Adnotacje
# "Duck typing" jest niezwykle wygodny, lecz może być podatny na błędy, jeśli zechcemy zastosować daną operację na typie danych, który o niej "nie wie". Pewną pomocą w tej kwestii są tzw. **adnotacje** ("annotations", "type hints"), które umieszczamy w definicji funkcji w następujący sposób:
def intersect( seq_1 : list , seq_2 : list ) -> list:
return [ item for item in seq_1 if item in seq_2 ]
# Adnotacje możemy dodać po argumentach funkcji, po dwukropku `:`, i oznaczają one wtedy, że _sugerowanym_ typem argumentu jest ten podany po nim. Podobnie możemy zasugerować typ obiektu zwracanego przez funkcję, po symbolu `->` w nagłówku. Są to jednak cały czas tylko sugestie, w żaden sposób nie wymuszane w czasie wykonywania programu; możemy np. nadal wykonać:
intersect( digraphs , text )
# Można sprawić, aby Python statycznie sprawdzał adnotacje i wyrzucał błąd jeśli otrzymane argumenty nie będą takiego typu, jak zasugerowany - służy do tego [zewnętrzna biblioteka `mypy`](http://mypy-lang.org/). Ale nawet i bez takiego rygoru adnotacje mogą pomóc utrzymać porządek w kodzie - choć na pewno ich pisanie dodaje pracy; są one [szczególnie pomocne](https://www.bernat.tech/the-state-of-type-hints-in-python/) w większych projektach, nad którymi pracuje więcej osób.
# Dodajmy jeszcze, że adnotacje danej funkcji możemy sprawdzić przez jej atrybut `__annotations__`:
intersect.__annotations__
# Temat sprawdzania typów w Pythonie jest [dość złożony](https://realpython.com/python-type-checking/) i bardzo techniczny, poza ramami tego kursu.
# #### "Docstrings"
# Rozpowszechnionym sposobem na poprawę czytelności programów są tzw. **"docstrings"**, czyli opisy działania funkcji skonstruowane wedle [ściśle określonych zasad](https://www.python.org/dev/peps/pep-0257/) (choć są różne warianty, m.in. "Sphinx Style", "Google Style", "Numpy Style"). Podstawowe zasady to:
#
# - są to wielolinijkowe stringi, zamknięte w potrójnych cudzysłowach `"`;
#
# - "docstring" zaczyna się od razu po nagłówku funkcji;
#
# - jego pierwsza linijka stanowi krótkie podsumowanie działania funkcji, zaś po nim mamy linijkę przerwy;
#
# - następnie mamy dłuższy opis działania funkcji;
#
# - na końcu opisujemy pokrótce każdy z argumentów i rezultat zwracany przez funkcję;
#
# - ostatnia linijka "docstring" jest pusta.
def intersect( seq_1 , seq_2 ):
"""
Intersection of two lists.
Iterates through the first list and retains only the elements present in the second list.
Parameters:
seq_1 (list): the first list
seq_2 (list): the second list
Returns:
list: A list containing the common elements of seq_1 and seq_2.
"""
return [ item for item in seq_1 if item in seq_2 ]
# "Docstring" danej funkcji dostępny jest przez atrybut `__doc__`:
print( intersect.__doc__ )
# ### 3.4. Funkcje to obiekty pierwszej klasy
# Widzieliśmy już na różnych przykładach, że z funkcjami w Pythonie można "wszystko robić", tj.:
#
# - można przypisywać funkcję do zmiennej;
#
# - funkcje można przechowywać w różnych strukturach danych, jak listy, słowniki itd.;
#
# - funkcja może być podana jako argument do innej funkcji;
#
# - funkcja może zwracać inną funkcję.
#
# Obiekty, z którymi można robić te fundamentalne czynności, nazywane są **obiektami pierwszej klasy** ("first-class objects") - przykłady to liczby, stringi, listy - a więc funkcje to w Pythonie również obiekty pierwszej klasy.
# #### Przypisywanie funkcji do zmiennej
# Zdefiniujmy prostą funkcję jako przykład:
def shout( text ):
return text.upper()
# Przypisujemy ją do zmiennej `yell`:
yell = shout
# ... i teraz zmienna ta wskazuje na funkcję `shout`:
yell( 'hello!' )
# Nazwę funkcji możemy w Pythonie otrzymać atrybutem `__name__`:
shout.__name__
# ... i mamy:
yell.__name__
# Zmienna wskazująca na funkcję i sama ta funkcja to dwie odrębne rzeczy.
# Spotkaliśmy się też już z przypisywaniem funkcji `lambda` do zmiennej.
# #### Przechowywanie funkcji w strukturach danych
# Funkcje mogą być np. elementami kolekcji. Widzieliśmy już wcześniej słownik funkcji przy okazji konstrukcji "switch". Tutaj inny przykład, z listą funkcji:
# +
format_options = [
yell ,
str.lower ,
str.capitalize
]
for format_option in format_options:
print( format_option( 'Hello World!' ) )
# -
# #### Przekazywanie funkcji jako argument innej funkcji
# Napiszmy funkcję `greet`, która wydrukuje tekst powitania, ale przekażmy jej jako argument inną funkcję, która zdefiniuje nam sposób formatowania tego powitania:
def greet( format_option ):
return format_option( 'Hello World!' )
# Teraz wywołajmy funkcję `greet` z różnymi wartościami argumentu, tj. różnymi funkcjami:
greet( yell )
greet( str.lower )
greet( lambda text : text[ ::2 ] )
# ... itd. Jest to dość niesamowite: funkcji możemy jako argument przekazywać "zachowanie", w formie funkcji opisującej to "zachowanie". Podobny przykład widzieliśmy przy okazji argumentu `key` funkcji wbudowanej `sorted`.
# #### Funkcja zwracająca funkcję
# Wspomnieliśmy już wcześniej, że funkcję możemy zdefiniować wewnątrz definicji innej funkcji. Co więcej, możemy taką funkcję zwrócić jako rezultat - innymi słowy, funkcje mogą zwracać "zachowanie".
def speak( how ):
def scream( text ):
return text.upper() + '!!!'
def murmur( text ):
return '...' + text.lower() + '...'
def just_say_it( text ):
return text
if how == 'scream':
return scream
elif how == 'murmur':
return murmur
else:
return just_say_it
# Teraz np.:
speak( 'scream' )
# ... jest funkcją, którą możemy wywołać na tekście:
speak( 'scream' )( 'Hello world' )
# Zastosowaniem tej możliwości jest tzw. "fabryka funkcji", np.:
def generate_power( n ):
def nth_power( x ):
return x ** n
return nth_power
# +
pow_3 = generate_power( 3 )
pow_3
# -
pow_3( 2 )
# Funkcje przyjmujące jako argumenty inne funkcje, bądź zwracające inne funkcje, nazywane są **funkcjami wyższego rzędu** ("higher-order functions").
# ## 4. Zadania domowe
# ### 4.1. Dłuższe ćwiczenia
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'><img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/heron.png' width = '250px'> Dłuższe ćwiczenie 8: <NAME>.
#
# [<NAME>](https://pl.wikipedia.org/wiki/Wz%C3%B3r_Herona) pozwala obliczyć pole trójkąta na podstawie długości jego boków, `a`, `b` i `c`:
#
# $S = \sqrt{p(p - a)(p - b)(p - c)} , \quad p = (a + b + c)/2$
#
# Napisz funkcję implementującą tę formułę. Uwzględnij sytuację, kiedy wyrażenie pod pierwiastkiem kwadratowym jest ujemne - wówczas odcinki `a`, `b`, `c` mają długości niepozwalające na utworzenie trójkąta; w tym wypadku wydrukuj odpowiedni komunikat, np. "It's not a triangle!". Odnajdź i zaimportuj funkcję pierwiastka kwadratowego z modułu `math`.
#
# Sprawdź swoją odpowiedź: Pole trójkąta o bokach 3, 4, 5 wynosi 6.
# +
# dłuższe ćwiczenie 8 - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'><img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/riemann_sum.gif' width = '350px'> Dłuższe ćwiczenie 9 (\*): Całka określona.
#
# Napisz funkcję (o nazwie `integrate`), przyjmującą cztery argumenty: pierwszym argumentem jest _inna funkcja_ `f` (ona będzie miała jeden argument liczbowy), kolejne dwa to liczby zmienno-przecinkowe `a`, `b`, a ostatnim dodatnia liczba całkowita `n`.
#
# Funkcja ta dokona przybliżonego obliczenia tzw. całki określonej funkcji $f$ między punktami $a$ i $b$, tj. $\int_{a}^{b} f(x)dx$. Pojęcie całki jest co prawda dość zaawansowane, ale sama jej definicja jest bardzo prosta! Jest to pole powierzchni pod wykresem funkcji $f$, a między pionowymi liniami postawionymi w punktach $a$ i $b$.
#
# Przybliżone jej obliczenie polega na narysowaniu serii $n$ wąskich prostokątów i zsumowanie ich pól powierzchni (jest to tzw. "suma Riemanna"). Podstawa każdego prostokąta ma długość $d/n$, gdzie zdefiniujmy $d = b - a$, gdyż odcinek między $a$ i $b$ dzielimy na $n$ równych części. Podział ten jest dokonany w następujących punktach:
# $$
# x_0 = a, \quad x_1 = a + \frac{d}{n}, \quad x_2 = a + \frac{2d}{n}, \quad \ldots \quad x_n = a + n \cdot \frac{d}{n} = b
# $$
# Wysokość natomiast kolejnych prostokątów to $f(x_0)$, $f(x_1)$, ..., $f(x_{n - 1})$. Skoro tak, to pole powierzchni każdego prostokąta to kolejno $f(x_0) \cdot d/n$, $f(x_1) \cdot d/n$, ..., $f(x_{n - 1}) \cdot d/n$. Zatem przybliżona wartość całki określonej to suma tych pól powierzchni:
# $$
# \frac{d}{n} \left( f( x_0 ) + f( x_1 ) + \ldots + f( x_{n - 1} ) \right)
# $$
# Oczywiście, im liczba $n$ większa, tym dokładniejsze jest przybliżenie. Możesz używać wartości np. 10 000.
#
# Napisz tę funkcję najpierw krok po kroku, jak to tu opisano, definiując kolejno zmienne `d`, `x_list` (lista punktów $x_0$, ..., $x_{n - 1}$), `f_list` (listę wartości funkcji $f$ w tych punktach) i wreszcie wynik `riemann_sum`. Następnie spróbuj napisać ją w jednej linijce, po słowie `return`, za pomocą "list comprehension".
#
# Sprawdź swoje rozwiązanie: Całka określona funkcji $f(x) = x^2 \sin(x)^3$ między 0 a 3 wynosi ok. 3.6159. Wywołaj swoją funkcję `integrate` z tymi wartościami argumentów, gdzie funkcję `f` zapisz jako funkcję `lambda` bezpośrednio w wywołaniu funkcji `integrate` (potrzebna będzie ci też funkcja sinus z modułu `math`).
# +
# dłuższe ćwiczenie 9 - rozwiązanie dłuższe
# +
# dłuższe ćwiczenie 9 - rozwiązanie krótsze
# +
# dłuższe ćwiczenie 9 - sprawdź swoje rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'><img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/caesar_cipher.png' width = '250px'> Dłuższe ćwiczenie 10: Szyfr Cezara.
#
# [Szyfr Cezara](https://en.wikipedia.org/wiki/Caesar_cipher) to jedna z najstarszych znanych metod szyfrowania; od Swetoniusza wiemy, że używał go <NAME> do szyfrowania listów do swoich przyjaciół. Polega on na tym, że każdą literę w alfabecie przesuwamy o daną liczbę miejsc, np. o 3, czyli np. `'a'` dałoby `'d'`, zaś `'y'` dałoby `'b'` itd. Aby zakodować dany tekst, przesuwamy każdą jego literę o 3 w przód; aby zakodowany tekst odkodować, przesuwamy jego litery o 3 w tył.
#
# Napisz funkcję `ceasar_shift`, która przyjmie dwa argumenty: literę `letter` i liczbę całkowitą `shift`, a zwróci literę po odpowiednim przesunięciu. Przesunięcie to ma działać tylko na litery, zaś wszelkie inne znaki (cyfry, spacje, znaki interpunkcyjne itp.) pozostawiać niezmienione. Ma też działać równie dobrze na małe i na wielkie litery.
#
# Napisz następnie funkcję `ceasar_code`, która przyjmie dwa argumenty, string `text` i liczbę całkowitą `shift`, a zwróci zakodowany tekst.
#
# Przyda ci się zdefiniowany poniżej string `ALPHABET`.
#
# Sprawdź swoją odpowiedź: Tekst `'I love coding in Python!!'` po zaszyfrowaniu z przesunięciem 3 ma postać `L oryh frglqj lq Sbwkrq!!`. Sprawdź też, czy po odkodowaniu (przesunięcie -3) otrzymasz oryginalny tekst.
# Wskazówka do części pierwszej: Aby uzyskać przesunięcie litery, wyznacz najpierw indeks litery `letter` w alfabecie `ALPHABET` za pomocą metody `index`, a następnie powiększ go o wartość `shift`. Użyj arytmetyki modulo długość stringu `ALPHABET` (wynosi ona 26), aby przejść od indeksów "późnych" liter na początek alfabetu; np. indeks litery `'z'` to 25, po dodaniu 3 to 28, ale modulo 26 daje to 2, czyli indeks litery `'c'` - przyda się operator reszty z dzielenia `%`.
#
# Zawrzyj to w instrukcji warunkowej sprawdzającej, czy znak jest literą (metoda stringów `isalpha`).
#
# Co więcej, przesunięcia dokonuj zawsze na literach zmienionych na małe (jako że tylko małe litery mamy w stringu `ALPHABET`), a następnie zwracaj przesuniętą literę albo małą, albo dużą, zależnie od wielkości oryginalnej litery.
# Wskazówka do części drugiej: Aby zakodować cały tekst `text`, zakoduj każdą literę z osobna używając zdefiniowanej w poprzednim kroku funkcji `ceasar_shift` i składni "list comprehension". Następnie połącz tak otrzymaną listę znaków w jeden string za pomocą metody stringów `join`.
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
# +
# dłuższe ćwiczenie 10 - rozwiązanie, część pierwsza
# +
# dłuższe ćwiczenie 10 - rozwiązanie, część druga
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'><img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/iambic_paddle.jpg' width = '300px'> Dłuższe ćwiczenie 11: Kod Morse'a.
#
# Wczytaj załączony do tej lekcji plik `morse_code.txt` z [kodem Morse'a](https://en.wikipedia.org/wiki/Morse_code) i zapisz go w słowniku `morse_code_dict` - wykonaj po prostu poniższą komórkę. Przydadzą się też zdefiniowane niżej zmienne `SHORT_GAP` (trzy spacje) i `MEDIUM_GAP` (siedem spacji), które konwencyjnie rozdzielają odpowiednio litery i słowa zapisane w języku Morse'a.
#
# (a) Napisz funkcję `morse_code_word`, która przyjmie jeden argument, string `word` (zakładamy, składający się z jednego tylko słowa, lecz pisanego dowolnie małymi lub wielkimi literami), a zwróci inny string, jego reprezentację w języku Morse'a. Np. słowo `'SOS'` to `'... --- ...'`, gdzie zwróćmy uwagę na odstęp trzech spacji między literami.
#
# (b) Napisz funkcję `morse_code_text`, która przyjmie jeden argument, string `text` (składający się potencjalnie z wielu słów), a zwróci inny string, jego reprezentację w języku Morse'a. Np. tekst `'I love coding in Python!!'` ma postać:
# ```
# '.. .-.. --- ...- . -.-. --- -.. .. -. --. .. -. .--. -.-- - .... --- -. -.-.-- -.-.--'
# ```
# gdzie mamy siedmio-spacjowe odstępy miedzy wyrazami.
#
# (c) Stwórz słownik `morse_code_dict_reverse`, w którym klucze i wartości to odpowiednio wartości i klucze słownika `morse_code_dict`.
#
# (d) Napisz funkcję `morse_decode_word` dekodującą pojedyncze słowo napisane w języku Morse'a. Zakładamy, że słowo po odkodowaniu składa się jedynie z wielkich liter.
#
# (e) Napisz funkcję `morse_decode_text` dekodującą cały tekst zapisany w języku Morse'a.
# Wskazówka: Używaj składni "list comprehension" i odpowiedniego słownika do kodowania/dekodowania. Powstałą tak listę łącz metodą `join` z odpowiednim separatorem, `SHORT_GAP` (w funkcji `morse_code_word`), `MEDIUM_GAP` (w funkcji `morse_code_text`), `''` (w funkcji `morse_decode_word`), czy też `' '` (w funkcji `morse_decode_text`).
#
# W funkcji `morse_code_word` pamiętaj ponadto o przekonwertowaniu każdej litery słowa na wielkie litery (metoda `upper`). Kiedy natomiast tworzysz listę do iteracji w powyższych "list comprehension", przyda się metoda `split`, odpowiednio po `' '` (w funkcji `morse_code_text`), `SHORT_GAP` (w funkcji `morse_decode_word`), `MEDIUM_GAP` (w funkcji `morse_decode_text`)
morse_code_dict = {}
with open( 'Files/morse_code.txt' ) as f:
for line in f:
key , val = line.split()
morse_code_dict[ key ] = val
SHORT_GAP = ' '
MEDIUM_GAP = ' '
# +
# dłuższe ćwiczenie 11a - rozwiązanie
# +
# dłuższe ćwiczenie 11b - rozwiązanie
# +
# dłuższe ćwiczenie 11c - rozwiązanie
# +
# dłuższe ćwiczenie 11d - rozwiązanie
# +
# dłuższe ćwiczenie 11e - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Dłuższe ćwiczenie 12: Masz daną listę liczb całkowitych `lst`, np. `lst = [5, 3, 2, 3, 1, 4, 2]`.
#
# (a) Posortuj ją w taki sposób, aby wszystkie liczby parzyste były przed wszystkimi liczbami nieparzystymi (lecz względny porządek zarówno pośród liczb parzystych, jak i pośród liczb nieparzystych, pozostał taki sam, jak w liście `lst`). Czyli tu dostalibyśmy wynik `[2, 4, 2, 5, 3, 3, 1]`.
#
# (b) Posortuj ją znowu tak, aby wszystkie liczby parzyste były przed wszystkimi liczbami nieparzystymi, lecz teraz aby zarówno liczby parzyste, jak i nieparzyste, były także pośród siebie posortowane. Czyli tu mielibyśmy resultat `[2, 2, 4, 1, 3, 3, 5]`.
# Wskazówka: Użyj wbudowanej funkcji `sorted` z kluczem `key`. Musisz napisać w obu przypadkach odpowiednią funkcję `lambda`. W punkcie (a) chcesz przetransformować listę `lst` w celach porównawczych używając reszty z dzielenia przez 2 - reszta zero pójdzie pierwsza, reszta jeden następna. W punkcie (b) chcesz sortować wedle wartości tupli 2-elementowej, gdzie jej pierwszy element to reszta z dzielenia przez 2, a drugi to sam element.
# +
# dłuższe ćwiczenie 12a - rozwiązanie
# +
# dłuższe ćwiczenie 12b - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Dłuższe ćwiczenie 13: Masz daną listę stringów `lst`, np. `lst = ['ccba', 'bca', 'aba', 'aaa', 'bac', 'a']`. Posortuj ją wedle pozycji litery `'a'` w wyrazie, tj. czym wcześniej litera `'a'` występuje w stringu, tym wcześniej powinien być na posortowanej liście (jeśli pierwsze wystąpienie litery `'a'` w różnych stringach jest na tym samym miejscu, to ich względna pozycja nie ulega zmianie). Czyli tu dostalibyśmy wynik `['aba', 'aaa', 'a', 'bac', 'bca', 'ccba']`.
# Wskazówka: Kluczem do sortowania powinien być indeks pierwszego wystąpienia litery `'a'`.
# +
# dłuższe ćwiczenie 13 - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Dłuższe ćwiczenie 14: Napisz funkcję `sum_of_digits`, która obliczy sumę cyfr dodatniej liczby całkowitej `n`, podanej jej jako jedyny argument. Widzieliśmy już, jak rozwiązać to zadanie bardzo elegancko za pomocą składni "list comprehension" i konwersji między typami `int` a `str`. Tutaj jednak rozwiąż to zadanie _rekursywnie_ - niech funkcja `sum_of_digits` wywołuja sama siebie w swoim ciele.
# Wskazówka: Zauważ, iż mając liczbę `n`, np. `n = 12345`, operacja reszty z dzielenia przez 10 daje ostatnią jej cyfrę, `12345 % 10` jest równe `5`. Czyli mamy już ostatnią cyfrę. Jak teraz jej się "pozbyć"? Otóż część całkowita z dzielenia przez 10 dokładnie ją "wycina", `12345 // 10` jest równe `1234`.
#
# Krok rekursywny implementuje to rozumowanie: Funkcja `sum_of_digits` powinna zwracać resztę z dzielenia `n` przez 10 (tj. ostatnią cyfrę liczby `n`) plus wartość samej siebie wywołanej na liczbie `n // 10`, a więc pozbawionej tejże ostatniej cyfry.
#
# Warunek brzegowy jest taki, że kiedy wreszcie `n` stanie się równe `0` (po "usunięciu" ostatniej cyfry, jaka pozostała), to niech funkcja `sum_of_digits` zwraca `0`.
# +
# dłuższe ćwiczenie 14 - rozwiązanie
# -
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'> Dłuższe ćwiczenie 15 (\*): Quicksort.
#
# ["Quicksort"](https://en.wikipedia.org/wiki/Quicksort) to efektywny i cały czas popularny algorytm sortowania, opracowany przez Tony'ego Hoare'a w 1959 r. W tym ćwiczeniu napisz funkcję `quicksort`, która przyjmuje jako jeden argument listę `lst`, a zwraca listę w wersji posortowanej (od najmniejszego do największego elementu); spróbuj napisać tę funkcję rekursywnie.
#
# Zasada algorytmu "quicksort" jest bardzo prosta:
#
# - Wybierz element listy `lst` o dowolnym indeksie, tzw. "pivot". Np. mamy listę `lst = [5, 3, 2, 3, 1, 4, 2]` i powiedzmy, że wybieramy element o indeksie 3, czyli drugi element `3` - to nasz "pivot".
#
# - Iterując się przez listę `lst`, stwórz trzy listy: (1) listę wszystkich elementów mniejszych od elementu "pivot" (nazwijmy ją `left`), (2) listę wszystkich elementów równych elementowi "pivot" (nazwijmy ją `middle`), (3) listę wszystkich elementów większych od elementu "pivot" (nazwijmy ją `right`). W naszym przykładzie mielibyśmy `left = [2, 1, 2]`, `middle = [3, 3]` i `right = [5, 4]`.
#
# - Teraz zastosuj algorytm "quicksort" rekursywnie do listy `left` i do listy `right` (to nam je rekursywnie posortuje), a na końcu połącz ("concatenate") ze sobą (1) tak posortowaną listę `left`, (2) listę `middle`, (3) tak posortowaną listę `right`.
#
# - Warunkiem brzegowym rekursji jest, iż jeśli lista `lst` jest pusta, to zwróć ją samą.
#
# Uwaga odnośnie pierwszego kroku: Wybór elementu "pivot" jest dowolny, natomiast okazuje się, że "mądry" jego wybór może zdecydowanie przyspieszyć działanie algorytmu, zaś "niemądry" wybór go spowolnić. Istnieją zatem złożone algorytmy jego wyboru. Tu nie będziemy się tym przejmować - postanówmy, że wybór elementu "pivot" dokonujemy w połowie, tj. jego indeks niech będzie połową długości listy `lst` (dokładniej: częścią całkowitą jej dzielenia przez 2).
# Wskazówka: Do wyboru elementu "pivot" przyda się operator `//`. Konstrukcji list `left`, `middle`, `right` dokonaj za pomocą składni "list comprehension". Krok rekursywny zwraca złączenie list, więc przyda się operator `+`; połącz posortowaną przez `quicksort` listę `left`, następnie listę `middle`, następnie posortowaną przez `quicksort` listę `right`. Nie zapomnij o warunku brzegowym - zapisz go elegancko, pamiętając, iż Python traktuje pustą listę jak wartość logiczną `False`.
# +
# dłuższe ćwiczenie 15 - rozwiązanie
# -
# ### 4.2. Projekty końcowe
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'><img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/calendar.jfif' width = '200px'> Dłuższe ćwiczenie 16: Zabawa z kalendarzem.
#
# To będzie nasz pierwszy projekt końcowy! Napiszemy serię funkcji związanych z utworzeniem "kartki z kalendarza".
#
# W biblitece standardowej Pythona mamy [moduł `calendar`](https://docs.python.org/3/library/calendar.html) zawierający wiele przydatnych funkcji związanych z kalendarzem. W szczególności, mamy funkcję `month`, która zwraca string opisujący "kartkę z kalendarza" na dany miesiąc. Wydrukowanie tego stringu za pomocą funkcji `print` wyświetla go w ładnie sformatowanym stylu. Naszym zadaniem będzie napisanie serii funkcji pozwalających odtworzyć tę funkcjonalność.
from calendar import month
month( 2020 , 3 )
print( month( 2020 , 3 ) )
# Zdefiniujmy najpierw kilka przydatnych zmiennych:
#
# - lista nazw miesięcy `MONTHS`;
#
# - lista skrótowych (2-literowych) nazw dni tygodnia `WEEKDAYS`, w konwencji takiej, iż zaczynamy od poniedziałku;
#
# - string `WEEKDAYS_HEADER` o postaci dokładnie takiej, jak nagłówek z dniami tygodnia otrzymywany z funkcji `calendar.month`, tj. `'Mo Tu We Th Fr Sa Su'`;
#
# - jego długość `CALENDAR_WIDTH` (równa 20), która jednocześnie definiuje nam szerokość "kartki z kalendarza".
MONTHS = [ 'January' , 'February' , 'March' , 'April' , 'May' , 'June' , 'July' , 'August' , 'September' , 'October' , 'November' , 'December' ]
WEEKDAYS = [ 'Mo' , 'Tu' , 'We' , 'Th' , 'Fr' , 'Sa' , 'Su' ]
WEEKDAYS_HEADER = ' '.join( WEEKDAYS )
CALENDAR_WIDTH = len( WEEKDAYS_HEADER )
# Część (a): Napisz funkcję `display_header` przyjmującą dwa argumenty: string `month` (np. `'March'`) i liczbę całkowitą `year` (np. `2020`), a zwracającą string będący "nagłówkiem kartki z kalendarza", a zatem składający się z dwóch linijek: w pierwszej linijce wyśrodkowany tekst zawierający miesiąc i rok, a w drugiej wspomniany wyżej nagłówek z dniami tygodnia (np. `' March 2020\nMo Tu We Th Fr Sa Su'`).
# Wskazówka do (a): Wyśrodkowanie uzyskaj albo metodą stringów `center`, albo manualnie, poprzez obliczenie liczby spacji przed i po tekście z miesiącem i rokiem; liczbę spacji przed oblicz jako szerokość "kartki" minus długość tekstu, podzielone przez dwa i z tego część całkowita (np. użyj operatora `//`). Pamiętaj, iż nową linię otrzymujemy przez "escape character" `'\n'`.
# +
# dłuższe ćwiczenie 16a - rozwiązanie
# -
# Część (b): Widzimy, że dni w naszej "kartce z kalendarza" wydrukowane są tak, iż jeśli dzień jest liczbą 1-cyfrową, to i tak zajmuje on dwa znaki, gdzie pierwszy jest spację, np. `' 8'`. Napisz funkcję `double_digit_str` przyjmującą jeden argument, liczbę całkowitą `day` (np. `8`), a zwracający jej reprezentację jako string, przy czym 1-cyfrowe dni są rozszerzone spację j.w.
# Wskazówka do (b): Użyj konwersji na string, prostej instrukcji warunkowej sprawdzającej jego długość i operatora `+`.
# +
# dłuższe ćwiczenie 16b - rozwiązanie
# -
# Część (c): Napisz funkcję `display_week`, która zwróci string zawierający dni pojedynczego tygodnia z "kartki z kalendarza", np.:
# ```
# ' 2 3 4 5 6 7 8'
# ```
# to drugi tydzień marca 2020. Niech przyjmuje ona dwa argumenty, liczby całkowite `week_beginning` (pierwszy dzień tego tygodnia, np. `2`) i `n_days` (całkowita liczba dni w miesiącu, np. `31`). Przyjmijmy konwencję taką, iż `week_beginning` może być ujemne, np. pierwszy tydzień marca 2020 miałby wartość tego argumentu `-5`. Natomiast wyświetlić się muszą oczywiście tylko dni tego tygodnia, ale ograniczone od 1 do `n_days`, czyli np. pierwszy tydzień marca 2020 to:
# ```
# ' 1'
# ```
# Wskazówka do (c): Utwórz najpierw listę dni z tygodnia zaczynającego się w `week_beginning`, przekonwertowanych od razu napisaną w punkcie (b) funkcją `double_digit_str`; użyj składni "list comprehension" z warunkiem `if-else` (przypomnij sobie tę szczególną składnię!), który upewni się, że wyświetlasz dni tylko od 1 do `n_days`, a w przeciwnym przypadku podwójną spację `' '`. Następnie połącz tę listę metodą stringów `join` z separatorem będącym pojedynczą spacją `' '`.
# +
# dłuższe ćwiczenie 16c - rozwiązanie
# -
# Część (d): W punkcie (c) napisaliśmy funkcję wyświetlającą pojedynczy tydzień z miesiąca na podstawie jego pierwszego dnia, w konwencji takiej, że może to być liczba ujemna, co oznacza, iż tydzień taki zaczyna się w poprzednim miesiącu. W tym punkcie napiszemy funkcję obliczającą wszystkie te "pierwsze dni" tygodnia. Mianowicie, napisz funkcję `week_beginnings` przyjmującą dwa argumenty: `first_weekday` i `n_days`; pierwszy argument to string opisujący dzień tygodnia (w naszej konwencji 2-literowej), którym jest pierwszy tego miesiąca, np. 1 marca 2020 to niedziela, zatem string ten byłby tu `'Su'`; drugi argument to całkowita liczba dni w miesiącu. Niech funkcja ta zwraca listę pierwszych dni każdego tygodnia w tym miesiącu, np. dla marca 2020 byłoby to `[-5, 2, 9, 16, 23, 30]`.
# Wskazówka do (d): Użyj funkcji `range` z trzema argumentami. Drugi i trzeci argument tej funkcji są proste: chcesz iść w krokach co 7 do maksymalnie `n_days` włącznie. Natomiast jaki jest pierwszy argument, opisujący pierwszy element listy? Dla przykładu, jeśli `first_weekday` jest równe `'Su'`, czyli dzień na liście `WEEKDAYS` o indeksie 6, to chcesz iść od dnia -5, co jest równe 1 - 6. Uzasadnij tę zależność, a następnie posłuż się metodą list `index`.
# +
# dłuższe ćwiczenie 16d - rozwiązanie
# -
# Część (e): Napisz funkcję `display_weeks`, która wydrukuje całą "kartkę z kalendarza", jeszcze bez "nagłówka", tj. wszystkie dni tygodnia, wydrukowane tak samo jak we wbudowanej funkcji `calendar.month`. Niech przyjmuje dwa argumenty, te same co funkcja `week_beginnings` z punktu (d), tj. `first_weekday` (dzień tygodnia, którym jest pierwszy dzień danego miesiąca, w formacie 2-literowego skrótu, np. `'Su'`) i `n_days` (całkowita liczba dni w tym miesiącu). Niech zwraca string, złożony z odpowiedniej liczby linijek, gdzie każda linijka to odpowiedni tydzień, np. dla marca 2020:
# ```
# ' 1\n 2 3 4 5 6 7 8\n 9 10 11 12 13 14 15\n16 17 18 19 20 21 22\n23 24 25 26 27 28 29\n30 31 '
# ```
# co po wydrukowaniu funkcją `print` daje:
# ```
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30 31
# ```
# Wskazówka do (e): W ciele funkcji utwórz najpierw listę pierwszych dni każdego tygodnia za pomocą funkcji `week_beginnings` z punktu (d). Następnie skonstruuj listę - za pomocą składni "list comprehension", iterując po tej liście pierwszych dni tygodnia - której każdym elementem jest string opisujący dany tydzień, otrzymany za pomocą funkcji `display_week` z punktu (c). Tak otrzymaną listę połącz metodą stringów `join`, z separatorem `'\n'`.
# +
# dłuższe ćwiczenie 16e - rozwiązanie
# -
# Część (f): Dzięki funkcjom napisanym w punktach (a)-(e) możemy już ładnie wydrukować naszą "kartkę z kalendarza". Jednakże funkcje te opierają się na założeniu, iż podajmy im jako argumenty pewne szczegółowe dane, jak np. to, jakim dniem tygodnia zaczyna się dany miesiąc, czy ile w miesiącu jest dni. W kolejnych punktach zautomatyzujemy podawanie tych informacji, jedynie na podstawie danego miesiąca i roku.
#
# Napisz najpierw funkcję `is_leap` z jednym argumentem, liczbą całkowitą `year` (np. `2020`), która zwraca wartość logiczną odpowiadającą na pytanie, czy dany rok jest przestępny. Zasada jest następująca:
#
# - jeśli rok jest podzielny przez 400, to jest przestępny;
#
# - z pozostałych lat, jeśli rok jest podzielny przez 100, to nie jest przestępny;
#
# - z pozostałych lat, jeśli rok jest podzielny przez 4, to jest przestępny;
#
# - wszystkie pozostałe lata nie są przestępne.
# Wskazówka do (f): Możesz zapisać to za pomocą kolejnych instrukcji warunkowych (i oczywiście operatora `%`). Ale spróbuj napisać to w jednej linijce, używając operatorów logicznych i odpowiednio umieszczając nawiasy.
# +
# dłuższe ćwiczenie 16f - rozwiązanie
# -
# Sprawdź swoją odpowiedź w (f): Zaimportuj funkcję wbudowaną `isleap` z modułu `calendar` i sprawdź, czy jej wyniki są równe twoim dla kilku tysięcy lat.
from calendar import isleap
all( [ calendar.isleap( year ) == is_leap( year ) for year in range( 3000 ) ] )
# Część (g): Napisz funkcję `number_of_days`, która przyjmie dwa argumenty: `month` (będący stringiem, jednym z listy `MONTHS`) i `leap` (będący obiektem typu Boolean), a zwróci liczbę dni w tym miesiącu: 28 lub 29 dla lutego, 30 dla kwietnia, czerwca, września, listopada, a 31 dla pozostałych.
# +
# dłuższe ćwiczenie 16g - rozwiązanie
# -
# Część (h): W tym punkcie napiszemy funkcję `julian_day_number` implementującą tzw. [datę juliańską](https://en.wikipedia.org/wiki/Julian_day). Wbrew swojej polskiej nazwie, nie jest to data, ale liczba całkowita, mianowicie liczba dni, jakie upłynęły od umownej daty 24 listopada 4714 roku p.n.e., licząc według tzw. [proleptycznego kalendarza gregoriańskiego](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar), czyli przedłużając obecny kalendarz gregoriański wstecz w czasie od jego wprowadzenia w 1582 r. Także wbrew swojej nazwie, nie ma ona nic wspólnego z Juliuszem Cezarem i kalendarzem juliańskim, lecz pochodzi ona od Juliusza <NAME>, ojca francuskiego uczonego Josepha <NAME>, który ją wprowadził.
#
# Data juliańska używana jest przede wszystkim w obliczeniach astronomicznych. Jest też prostym sposobem na obliczenie liczby dni, jakie upłynęły między dwoma danymi datami - wystarczy po prostu odjąć od siebie odpowiadające tym datom liczby juliańskie. Innym jej zastosowaniem jest odpowiedź na pytanie, jakim dniem tygodnia była dana data - i to właśnie wykorzystamy w punkcie (i) poniżej.
#
# Uwaga: Bardziej precyzyjnie, data juliańska odnosi się nie tylko do _dnia_, ale do dokładnej godziny. W szczególności, dzień liczy się od południa czasu UTC. My jednak nie przejmujmy się tym i obliczmy uproszczoną wersję daty juliańskiej, opisującą jedynie dzień.
#
# Funkcja `julian_day_number` niech przyjmuje trzy argumenty: `day` (liczba całkowita, dany dzień), `month` (string, jak w liście `MONTHS`, opisujący miesiąc), `year` (liczba całkowita, dany rok). Procedura jest następująca: Miesiącom przypisane są ich kolejne liczby porządkowe, przy czym jeśli miesiąc to styczeń lub luty, to należy: (1) od roku `year` odjąć 1, (2) do liczby porządkowej miesiąca dodać 12. Innymi słowy, miesiącom przypisane są liczby: 3 dla marca, 4 dla kwietnia, ..., 12 dla grudnia, 13 dla stycznia, 14 dla lutego, przy czym styczeń i luty traktowane są jako miesiące _poprzedniego_ roku. W ciele funkcji zdefiniujmy zatem zmienne `Y` i `M` opisujące rok i miesiąc w tej konwencji. Np. styczeń 2020 miałby `Y = 2019` i `M = 13`. Na podstawie tych zmiennych, data juliańska dana jest wzorem:
# ```
# Y * 365 + Y // 4 - Y // 100 + Y // 400 + ( 153 * M - 7 ) // 5 + day + 1721029
# ```
# +
# dłuższe ćwiczenie 16h - rozwiązanie
# -
# Sprawdź swoją odpowiedź w (h): Data juliańska dnia 24 listopada -4713 (czyli 4714 r. p.n.e.; pamiętajmy, że nie było roku zerowego naszej ery!) to 0, dnia 16 listopada 1858 to 2400000, 8 marca 2020 to 2458917, zaś 31 sierpnia 2132 to 2500000.
for date in [ ( 24 , 'November' , -4713 ) , ( 16 , 'November' , 1858 ) , ( 8 , 'March' , 2020 ) , ( 31 , 'August' , 2132 ) ]:
print( julian_day_number( *date ) )
# Część (i): Okazuje się, że reszta z dzielenia przez 7 daty juliańskiej dokładnie opisuje dzień tygodnia tej daty, gdzie 0 to poniedziałek, 1 wtorek itd. Napisz funkcję `weekday`, przyjmującą trzy argumenty, te same, co wyżej, tj. `day`, `month`, `year`, a zwracającą dzień tygodnia tej daty, w naszej konwencji 2-literowych skrótów obecnych w liście `WEEKDAYS`, np. `'Mo'`.
# +
# dłuższe ćwiczenie 16i - rozwiązanie
# -
# Sprawdź swoją odpowiedź: Daty urodzenia kilku sławnych uczonych wypadają w następujące dni tygodnia:
#
# | Uczony | Data urodzenia | Dzień tygodnia |
# | --- | --- | --- |
# | <center><NAME></center> | <center>19 lutego 1473</center> | <center>środa</center> |
# | <center>Galileusz</center> | <center>15 lutego 1564</center> | <center>sobota</center> |
# | <center><NAME></center> | <center>4 stycznia 1643</center> | <center>niedziela</center> |
# | <center><NAME></center> | <center>15 kwietnia 1707</center> | <center>piątek</center> |
# | <center><NAME></center> | <center>30 kwietnia 1777</center> | <center>środa</center> |
# | <center><NAME></center> | <center>13 czerwca 1831</center> | <center>poniedziałek</center> |
# | <center><NAME></center> | <center>14 marca 1879</center> | <center>piątek</center> |
# | <center><NAME></center> | <center>28 kwietnia 1906</center> | <center>sobota</center> |
# | <center><NAME></center> | <center>23 czerwca 1912</center> | <center>niedziela</center> |
# | <center><NAME></center> | <center>20 listopada 1924</center> | <center>czwartek</center> |
# +
birthdates = {
'<NAME>' : ( 19 , 'February' , 1473 ) ,
'<NAME>' : ( 15 , 'February' , 1564 ) ,
'<NAME>' : ( 4 , 'January' , 1643 ) ,
'<NAME>' : ( 15 , 'April' , 1707 ) ,
'<NAME>' : ( 30 , 'April' , 1777 ) ,
'<NAME>' : ( 13 , 'June' , 1831 ) ,
'<NAME>' : ( 14 , 'March' , 1879 ) ,
'<NAME>' : ( 28 , 'April' , 1906 ) ,
'<NAME>' : ( 23 , 'June' , 1912 ) ,
'<NAME>' : ( 20 , 'November' , 1924 )
}
correct_answers = [ 'We' , 'Sa' , 'Su' , 'Fr' , 'We' , 'Mo' , 'Fr' , 'Sa' , 'Su' , 'Th' ]
for ( name , ( day , month , year ) ) , correct_answer in zip( birthdates.items() , correct_answers ):
verdict = 'CORRECT' if weekday( day , month , year ) == correct_answer else 'INCORRECT'
print( f'{name} was born on {day} {month} {year}, which is... {weekday( day , month , year )}, and which is {verdict}.' )
# -
# Część (j): Napisz finalną funkcję tego zadania, `display_calendar`, która przyjmie dwa argumenty, `month` (string z listy `MONTHS`) i `year` (liczba całkowita), a zwróci string opisujący pełną "kartkę z kalendarza" dla danego miesiąca i roku.
# Wskazówka do (j): W ciele funkcji dokonaj następujących obliczeń:
#
# - zdefiniuj zmienną typu Boolean `leap`, która odpowie na pytanie, czy rok `year` jest przestępny - użyj funkcji `is_leap` z punktu (f);
#
# - zdefiniuj zmienną `n_days`, wyznaczającą liczbę dni tego miesiąca - użyj funkcji `number_of_days` z punktu (g);
#
# - zdefiniuj zmienną `first_weekday`, będącą stringiem opisującym dzień tygodnia, jakim jest dzień pierwszy tego miesiąca - użyj funkcji `weekday` z punktu (i);
#
# - wreszcie skomponuj "kartkę z kalendarza", tworząc jej nagłówek - za pomocą funkcji `display_header` z punktu (a) - oraz kolejne dni tygodnia - za pomocą funkcji `display_weeks` z punktu (e).
# +
# dłuższe ćwiczenie 16j - rozwiązanie
# -
# Sprawdź swoją odpowiedź: "Kartka z kalendarza" dla marca 2020 powinna mieć postać:
# ```
# ' March 2020\nMo Tu We Th Fr Sa Su\n 1\n 2 3 4 5 6 7 8\n 9 10 11 12 13 14 15\n16 17 18 19 20 21 22\n23 24 25 26 27 28 29\n30 31 '
# ```
# co po wydrukowaniu funkcją `print` wygląda tak:
# ```
# March 2020
# Mo Tu We Th Fr Sa Su
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30 31
# ```
display_calendar( 'March' , 2020 )
print( display_calendar( 'March' , 2020 ) )
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/question.png'><img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/tic-tac-toe.jfif' width = '200px'> Dłuższe ćwiczenie 17: Kółko i krzyżyk.
#
# To będzie nasz drugi projekt końcowy! Napiszemy kompletny program realizujący grę w kółko i krzyżyk (na planszy 3 x 3), łącznie ze sztuczną inteligencją komputera!
#
# Plansza do gry reprezentowana będzie przez listę list - listę o trzech elementach, każdy z nich także będący listą o trzech elementach, które to należą do zestawu stringów: `'X'` (dane pole zajęte przez X), `'O'` (dane pole zajęte przez O), `' '` (dane pole puste). Np. `[['X', ' ', ' '], ['X', ' ', 'O'], [' ', 'O', 'X']]`. Graczy nazwiemy `'Player'` i `'Computer'`.
# Przydadzą się następujące importy:
#
# - Funkcja `random.choice` przyjmuje jako argument listę, a zwraca losowo wybrany jej element. Przyda nam się przy wyborze pierwszego gracza, a także jako część strategii gry komputera.
#
# - Funkcja `copy.deepcopy` dokonuje tzw. "kopii głębokiej" listy list. Jest to związane z zagadnieniem wspominanym w Lekcji 5, mutowalności list. Jeśli mamy listę list - a takim właśnie obiektem będzie nasza plansza do gry - to jeżeli utworzymy jej zwykłą kopię (metodą list `copy`), a następnie w tej kopii zmienimy wartość elementu pod-listy, to okazuje się, iż wartość ta zmieni się także w oryginale! Aby tego uniknąć, należy utworzyć "kopię głęboką". Przyda nam się to przy pisaniu algorytmu sztucznej inteligencji - komputer utworzy kopię głęboką aktualnej planszy do gry, a następnie na niej będzie rozważał swój kolejny ruch.
from random import choice
from copy import deepcopy
choice( range( 100 ) ) # losowy wybór z listy
# +
board_example = [ [ 'X' , ' ' , ' ' ] , [ ' ' , ' ' , ' ' ] , [ ' ' , ' ' , ' ' ] ]
board_example_copy = board_example.copy() # zwykła kopia listy list
board_example_copy[ 1 ][ 2 ] = 'O'
print( board_example_copy )
print( board_example ) # zmienna board_example zmieniła wartość!!
# +
board_example = [ [ 'X' , ' ' , ' ' ] , [ ' ' , ' ' , ' ' ] , [ ' ' , ' ' , ' ' ] ]
board_example_copy = deepcopy( board_example ) # kopia głęboka listy list
board_example_copy[ 1 ][ 2 ] = 'O'
print( board_example_copy )
print( board_example ) # zmienna board_example NIE zmieniła teraz wartości
# -
# Część (a): Napisz funkcję `generate_empty_board`, nieprzyjmującą żadnych argumentów, a zwracającą pustą planszę, tj. listę list, gdzie każdy string to `' '`, czyli `[[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]`.
# Wskazówka do (a): Napisz zagnieżdżoną "list comprehension", gdzie iterujesz się po `range(3)` (możesz użyć niemego iteratora!).
# +
# dłuższe ćwiczenie 17a - rozwiązanie
# -
# Część (b): Narysuj planszę. Napisz funkcję `display_board`, która przyjmie jeden argument `board` (będący naszym obiektem planszy, tj. listą list j.w.), nie będzie nic zwracać, a jej efektem ubocznym będzie wydrukowanie w ładny sposób planszy `board`, łącznie ze współrzędnymi pól. Np. dla `board = [['X', ' ', ' '], ['X', ' ', 'O'], [' ', 'O', 'X']]` chcemy otrzymać:
# ```
# 1 2 3
# a X| |
# b X| |O
# c |O|X
# ```
# Współrzędne rzędów nazwiemy a, b, c, zaś kolumn 1, 2, 3 - przez nie będziemy później m.in. definiować ruch gracza czy komputera.
# Wskazówka do (b): Utwórz najpierw "nagłówek" z numerami kolumn.
#
# Następnie utwórz listę stringów, gdzie każdy string to będzie kolejny wiersz naszej planszy, łącznie z literką wiersza. Użyj składni "list comprehension", gdzie iterujesz się po złączonych "na suwak" (funkcją `zip`) kolekcjach: stringu `'abc'` (to da ci kolejne literki wierszy) i listy `board` (gdzie każdy jej kolejny element to 3-elementowa lista). Każdy kolejny string w tej iteracji składa się z kolejnej literki wiersza, dołączonej (`+`) do kolejnej pod-listy listy `board`, złączonej metodą `join` z separatorem `|`.
#
# Na koniec połącz "nagłówek" z powyżej utworzoną listą, złączoną metodą `join` z separatorem `'\n'`.
# +
# dłuższe ćwiczenie 17b - rozwiązanie
# -
# Część (c): Wybierz literę gracza. Napisz funkcję `input_player_letter`, bez argumentów, a która wczytywać będzie (funkcją `input`) od gracza, którą literką chciałby grać, X czy O. Niech zwraca tuplę 2-elementową, której pierwszym elementem jest literka gracza, a drugim literka komputera. Gdyby gracz wpisał coś innego niż `'X'` czy `'O'`, niech pojawi się odpowiedni komunikat, np. `'This is not a valid letter!'`.
# Wskazówka do (c): Ciało funkcji niech będzie złożone z pętli `while True`. Zdefiniuj tam zmienną `player_letter` poprzez funkcję `input` z odpowiednim komunikatem, np. `'Choose X or O:'`.
#
# Jeśli wybrana litera będzie jedną z `'X'` czy `'O'`, to zdefiniuj `computer_letter` jako literę "przeciwną" i przerwij pętlę. W przeciwnym wypadku, wydrukuj komunikat o niewłaściwej literze.
#
# Na koniec zwróć tuplę liter.
# +
# dłuższe ćwiczenie 17c - rozwiązanie
# -
# Część (d): Napisz funkcję `who_starts`, bez argumentów, która zwraca losowo wybrany string spośród `'Player'`, `'Computer'`. Niech też jako efekt uboczny drukuje odpowiednią informację, np. `'Player goes first!'`.
# Wskazówka do (d): Użyj funkcji `choice` z modułu `random`.
# +
# dłuższe ćwiczenie 17d - rozwiązanie
# -
# Część (e): Funkcje pomocniczne. Napisz dwie następujące przydatne funkcje:
#
# (e-1) Funkcja `move_to_indices` przyjmuje jeden argument `move`, którym będzie string 2-znakowy o postaci np. `'1a'` czy `'2c'` itp., wskazujący na żądane pole na planszy - w powyżej zdefiniowanych współrzędnych. Niech zwraca ona tuplę 2-elementową indeksów 0, 1, 2 odpowiadających temu polu. Czyli np. `'1a'` odpowiada indeksom 0, 0, zaś `'2c'` indeksom 2, 1.
#
# (e-2) Funkcja `indices_to_move` jest odwrotna do poprzedniej: przyjmuje dwa argumenty `row` i `col`, będące liczbami 0, 1, 2, a zwraca string 2-znakowy o powyższej postaci. Czyli dla argumentów 2, 1 powinna dawać `'2c'` itd.
# Wskazówka do (e-1): String `move` zamień najpierw na tuplę `num, let` stosując konwersję typów funkcją `tuple`. Chcesz zwrócić tuplę indeksów `row, col`. Teraz `col` jest powiązany z `num` w prosty sposób - konwersja na `int` i odjęcie jedynki. Natomiast aby dostać `row` z literki `let` użyj metody `index` na stringu `'abc'`.
# Wskazówka do (e-2): Argument `row` zamień na literkę przy użyciu składni nawiasów kwadratowych na stringu `'abc'`. Argument `col` zamień na numerek przez zwiększenie go o jeden i konwersję na string.
# +
# dłuższe ćwiczenie 17e-1 - rozwiązanie
# +
# dłuższe ćwiczenie 17e-2 - rozwiązanie
# -
# Część (f): Lista dostępnych ruchów. Napisz funkcję `available_moves`, która przyjmie jeden argument `board` (będący naszym obiektem planszy - listą list), a zwróci listę wolnych (tj. niezajętych ani przez X, ani przez O) pól, w formacie 2-znakowych stringów j.w. Np. dla planszy `board = [['X', ' ', ' '], ['X', ' ', 'O'], [' ', 'O', 'X']]` powinna zwrócić listę `['2a', '3a', '2b', '1c']`.
# Wskazówka do (f): Napisz podwójnie zagnieżdżoną "list comprehension". W pierwszej iteracji idź (iteratorem `row, line`) po `enumerate(board)`, a w drugiej (iteratorem `col, letter`) po `enumerate(line)`, filtrując tylko te pola `letter`, które są równe `' '` (czyli niezajęte). W ten sposób `row` i `col` będą indeksami tychże niezajętych pól. Ale skoro chcemy otrzymać nie indeksy, a 2-znakowe oznaczenia pól, to przekonwertuj `row` i `col` funkcją `indices_to_move` z punktu (e-2).
# +
# dłuższe ćwiczenie 17f - rozwiązanie
# -
# Część (g): Wpisz ruch gracza. Napisz funkcję `input_player_move`, która przyjmie jeden argument `board` (obiekt planszy). W jej ciele utwórz zmienną `player_move` poprzez wprowadzenie jej wartości (w formacie 2-znakowych współrzędnych) przez gracza (funkcją `input`, z odpowiednim pytaniem, np. `'Where do you want to move:'`). Sprawdź, czy ruch ten jest pośród ruchów dostępnych, a jeśli nie, to wydrukuj odpowiedni komunikat, np. `'Choose another move!'` i kontunuuj proszenie użytkownika o wybór. Na koniec, zwróć `player_move`.
# Wskazówka do (g): Utwórz najpierw listę dostępnych ruchów używając funkcji `available_moves` z punktu (f). Następnie pytanie funkcją `input` zawrzyj w pętli `while True`. Jeśli wprowadzony ruch będzie na liście dostępnych ruchów, to pętlę przerwij.
# +
# dłuższe ćwiczenie 17g - rozwiązanie
# -
# Część (h): Wykonaj ruch. Napisz funkcję `make_move`, przyjmującą trzy argumenty: `board` (aktualną planszę), `letter` (aktualną literę, `'X'` albo `'O'`) oraz `move` (aktualny ruch, w formacie 2-znakowych współrzędnych, np. `'2a'`). Niech funkcja ta nic nie zwraca, ma natomiast efekt uboczny modyfikacji swojego argumentu `board` literą `letter` na pozycji `move`.
# Wskazówka do (h): Przekonwertuj najpierw współrzędne `move` na indeksy `row, col` funkcją `move_to_indices` z punktu (e-1). Następnie zmień element listy list `board` o indeksach `row` i `col` na `letter`.
# +
# dłuższe ćwiczenie 17h - rozwiązanie
# -
# Część (i): Sprawdź zwycięstwo. Zwycięstwo litery `letter` zachodzi wtedy, kiedy jeden z następujących warunków jest prawdziwy: w którymś z rzędów wszystkie trzy pola zajmuje litera `letter`; w którejś kolumnie tak jest; na którejś z dwóch przekątnych tak jest. W tej części zaimplementujemy funkcję sprawdzjącą zwycięstwo litery `letter` na danej planszy `board`. W tym celu napiszmy następujące funkcje pomocnicze:
#
# (i-1) Napisz funkcję `all_equal`, przyjmującą dwa argumenty: listę `lst` i literę `letter`, a zwracającą wartość logiczną odpowiadającą na pytanie, czy wszystkie elementy listy `lst` są równe `letter`.
#
# (i-2) Napisz funkcję `transpose_board`, przyjmującą jeden argument `board` i zwracającą jego tzw. ["transpozycję"](https://en.wikipedia.org/wiki/Transpose), czyli odbicie lustrzane w głównej przekątnej, a więc zmieniające wiersze na kolumny i na odwrót. A więc dla `board = [['X', ' ', ' '], ['X', ' ', 'O'], ['O', ' ', 'X']]` dostalibyśmy `[['X', 'X', 'O'], [' ', ' ', ' '], [' ', 'O', 'X']]` (obejrzyj to sobie przy użyciu funkcji `display_board` z punktu (b)).
#
# (i-3) Napisz funkcję `diagonals`, z jednym argumentem `board`, a zwracającą 2-elementową listę przekątnych planszy `board`. Czyli dla `board = [['X', ' ', ' '], ['X', ' ', 'O'], ['O', ' ', 'X']]` byłaby to lista `[['X', ' ', 'X'], ['O', ' ', ' ']]`.
#
# (i-4) Napisz funkcję `is_winner`, z dwoma argumentami: `board` (aktualna plansza) i `letter` (aktualna litera, `'X'` albo `'O'`). Niech zwraca wartość logiczną odpowiadającą na pytanie, czy litera `letter` zwycięża na tej planszy.
# Wskazówka do (i-1): Zmień listę `lst` na zbiór i sprawdź jego równość 1-elementowemu zbiorowi z literą `letter`.
# Wskazówka do (i-2): Napisz "list comprehension" po `range(3)` (iterator `i`), gdzie każdym elementem jest inne "list comprehension", po pod-listach `board` (iterator `line`). Chcesz wybrać `line[i]`
# Wskazówka do (i-3): Pierwsza diagonala składa się z elementów `board[i][i]`, a druga z `board[2 - i][i]`, gdzie iterator `i` przechodzi przez `range(3)`.
# Wskazówka do (i-4): Napisz "list comprehension", gdzie iterujesz się po złączonych (operatorem `+`) listach `board`, `transpose_board(board)` i `diagonals(board)`. W każdym kroku iteracji sprawdź, czy dana pod-lista składa się tylko z liter `letter` (funkcją `all_equal` z punktu (i-1)). Pomocna będzie funkcja wbudowana `any`.
# +
# dłuższe ćwiczenie 17i-1 - rozwiązanie
# +
# dłuższe ćwiczenie 17i-2 - rozwiązanie
# +
# dłuższe ćwiczenie 17i-3 - rozwiązanie
# +
# dłuższe ćwiczenie 17i-4 - rozwiązanie
# -
# Część (j): Sprawdź, czy plansza jest zapełniona. Napisz funkcję `is_board_full`, z jednym argumentem `board`, która zwróci wartość logiczną odpowiadającą na pytanie, czy plansza jest całkowicie zapełniona, tj. czy nie ma na niej ani jednego pustego pola `' '`.
# Wskazówka do (j): Napisz "list comprehension", gdzie iterujesz się po pod-listach `line` planszy `board`. W każdym kroku iteracji obliczaj warunek logiczny, że `' '` jest w `line` (użyj operatora `in`). Na koniec tak otrzymaną listę obłóż funkcją `any`.
# +
# dłuższe ćwiczenie 17j - rozwiązanie
# -
# Część (k): Zmień turę i literę. Napisz funkcję `change_turn_and_letter`, z dwoma argumentami: `turn` (string `'Player'` lub `'Computer'`) oraz letter (string `'X'` lub `'O'`). Niech funkcja ta zwraca ich "wartości przeciwne", tj. tuplę 2-elementową `new_turn , new_letter`.
# Wskazówka do (k): Możesz zrobić to 1-linijkowymi wyrażeniami `if-else`.
# +
# dłuższe ćwiczenie 17k - rozwiązanie
# -
# Część (l): Wygeneruj ruch komputera. To będzie nasza najdłuższa funkcja - odpowiadającą za "sztuczną inteligencję" komputera w naszej grze. Napisz funkcję `generate_computer_move`, z dwoma argumentami: `board` (aktualna plansza) i `computer_letter` (litera, jaką gra komputer). Niech zwraca ona ruch komputera, jako 2-znakowe współrzędne.
#
# W ciele funkcji najpierw utwórz zmienną `player_letter` jako literę "przeciwną" do `computer_letter`, a także listę dostępnych ruchów na planszy `board`.
#
# Algorytm, jaki tu zapiszemy, składa się z następujących pięciu kroków:
#
# Krok 1: Sprawdź, czy komputer może wygrać tym ruchem. Przeanalizuj wszystkie możliwe warianty najbliższego ruchu komputera (spośród dostępnych ruchów) i jeśli któryś doprowadza do zwycięstwa, to zwróć go.
#
# Krok 2: Sprawdź, czy gdyby to gracz dokonywał następnego ruchu, to mógłby nim wygrać. Jeśli tak, zablokuj ten ruch poprzez wykonanie go samemu - zwróć ten ruch.
#
# Krok 3: Zwróć jako ruch jeden (losowo wybrany) z dostępnych rogów planszy (pola `'1a'`, `'3a'`, `'1c'`, `'3c'`).
#
# Krok 4: Zwróć jako ruch środek planszy (pole `'2b'`).
#
# Krok 5: Zwróć jako ruch jeden (losowo wybrany) z dostępnych boków planszy (pola `'1b'`, `'2a'`, `'2c'`, `'3b'`).
#
# Przypomnij sobie, że napotkany `return` kończy wykonywanie ciała funkcji, a zatem warunki te sprawdzane są w wypisanej kolejności i jeśli tylko któryś zajdzie, to zwracana jest odpowiednia wartość, a wykonywanie ciała funkcji natychmiast się przerywa.
# Wskazówka do (l): Krok 1 i 2 piszemy analogicznie:
#
# - Przeiteruj się (iteratorem `move`) przez listę dostępnych ruchów (otrzymaną z funkcji `available_moves` z punktu (f)).
#
# - W każdym kroku utwórz zmienną `virtual_board` będącą "kopią głęboką" planszy `board`.
#
# - Na tej wirtualnej planszy dokonaj ruchu `move` funkcją `make_move` z punktu (h). Ruch jest literą komputera (w Kroku 1) albo gracza (w Kroku 2).
#
# - Napisz instrukcję warunkową sprawdzającą, czy dana litera jest zwycięzcą po tym ruchu - użyj funkcji `is_winner` z punktu (i-4). Jeśli tak, zwróć ruch `move`.
#
# Aby napisać Krok 3 i 5, stwórz najpierw listę dostępnych rogów albo boków, np. poprzez odpowiednie "list comprehension". Jeśli jest niepusta, zwróć losowy jej element (funkcją `choice` z modułu `random`).
#
# W Kroku 4 sprawdź, czy środek planszy jest dostępny, a jeśli tak, to go zwróć.
# +
# dłuższe ćwiczenie 17l - rozwiązanie
# -
# Część (m): Właściwa gra. Napisz program "kółko i krzyżyk":
#
# - Zdefiniuj zmienną `bo` jako pustą planszę - użyj funkcji `generate_empty_board` z punktu (a).
#
# - Wydrukuj planszę `bo` - użyj funkcji `display_board` z punktu (b).
#
# - Zdefiniuj litery gracza i komputera, `pl, cl` - poprzez funkcję `input_player_letter` z punktu (c).
#
# - Zdefinuj turę `tn` (`'Player'` albo `'Computer'`) - funkcją `who_starts` z punktu (d). Niech zmienna `let` opisuje literę aktualnego gracza, czyli `pl` jeśli `tn` jest równe `'Player'` i `cl` jeśli `tn` jest równe `'Computer'`.
#
# Główna część programu poniżej zawarta będzie w pętli `while True`:
#
# - Jeśli `tn` jest równe `'Player'`, zdefinuj ruch `m` funkcją `input_player_move` z punktu (g). Jeśli `tn` jest równe `'Computer'`, to `m` niech będzie ruchem komputera, wygenerowanym funkcją `generate_computer_move` z punktu (l). W tym drugim przypadku wydrukuj też odpowiednią wiadomość, np. `'Computer moved to ...'`.
#
# - Wykonaj na planszy `bo` literą `let` ruch `m` - użyj funkcji `make_move` z punktu (h).
#
# - Wydrukuj aktualną planszę `bo` - funkcją `display_board` z punktu (b).
#
# - Sprawdź warunki: Jeśli litera `let` wygrywa - użyj funkcji `is_winner` z punktu (i-4) - to wypisz odpowiedni komentarz (np. `'Player, playing X, is the winner!!!'`) i przerwij pętlę. Jeśli nie, to sprawdź warunek, czy plansza jest całkowicie zapełniona - funkcją `is_board_full` z punktu (j) - i jeśli tak, to wydrukuj odpowiedni komentarz (np. `'The board is full - the game is a tie!'`) i przerwij pętlę. Wreszcie jeśli żaden z tych wariantów nie jest spełniony, zmień turę i literę - funkcją `change_turn_and_letter` z punktu (k).
# +
# dłuższe ćwiczenie 17m - rozwiązanie
# -
# ### 4.3. Zamiast zakończenia
# <img style = 'float: left; margin-right: 10px; margin-bottom: 10px' src = 'Images/congratulations.png'> Gratulacje!!!
#
# Dotarliśmy do końca kursu! Rozwiązaliśmy masę zadań! Ten kurs, choć "podstawowy", wcale taki podstawowy nie był - nauczyliśmy się nie tylko fundamentów składni, ale dotarliśmy do naprawdę zaawansowanych tematów - kochasz i nienawidzisz jednocześnie składnię "list comprehension", nie straszne ci indeksy kolekcji, wiesz co to rekursja, umiesz napisać algorytm sortujący i grę, z łatwością żonglujesz stringami. Poćwiczyliśmy na tuzinach zadań używane na co dzień przez każdego programistę Pythona rozwiązania, jak "list comprehension", słowniki, metody stringów, funkcje lambda. Dowiedzieliśmy się co nieco, jak pisać kod, który określony może być jako "elegancki" - "Pythonic". Zaznajomiliśmy się z myśleniem algorytmicznym. No i dowiedzieliśmy się, jakie supersamochody produkuje LEGO, kto był pradziadkiem Bilbo Bagginsa, jaki jest najtrudniejszy prosty problem w matematyce i że Einstein urodził się w piątek 🙂.
#
# Co dalej?
#
# Pamiętajmy o dwóch sentencjach łacińskich, które przyświecały temu kursowi: "quidquid discis, tibi discis", czyli "czegokolwiek się uczysz, uczysz się dla siebie", a także "repetitio est mater studiorum", czyli "powtarzanie jest matką wiedzy". Nauczyliśmy się tu naprawdę dużo. Aby tego nie zapomnieć, należy te rzeczy ćwiczyć - czy to na zadaniach, czy na praktycznych projektach. Droga do zostania programistą Pythona stoi otworem - z takim fundamentem masz bardzo mocny start.
# #### Zadania
# <img style = 'float: right; margin-left: 10px; margin-bottom: 10px' src = 'Images/feynman.webp' width = '400px'>Jeśli nie masz jeszcze dość **zadań** 🙂, ćwicz je na wielu dostępnych stronach, np.:
#
# - [Project Euler](https://projecteuler.net/),
#
# - [LeetCode](https://leetcode.com/),
#
# - [Programming Praxis](https://programmingpraxis.com/),
#
# - [Code Wars](https://www.codewars.com/),
#
# - wielu innych stronach i repozytoriach, jak [tutaj](http://pythonpracticeprojects.com/), [tutaj](http://puzzles.bostonpython.com/), [tutaj](https://github.com/blakeembrey/code-problems), czy [tutaj](https://github.com/donnemartin/interactive-coding-challenges).
#
# Wyjątkowym zbiorem wyzwań programistycznych jest ["Build your own X"](https://github.com/danistefanovic/build-your-own-x), gdzie możesz - kodując w różnych językach - samodzielnie napisać program do rozszerzonej rzeczywistości, blockchain, bazę danych, grę, czy system operacyjny! A nawet napisać swój własny język programowania! W ten sposób możesz zapoznać się z najważniejszymi zasadami tych technologii poprzez samodzielne ich kodowanie, zgodnie z dewizą <NAME>, zostawioną na jego ostatniej tablicy kredowej, "What I cannot create, I do not understand".
#
# Prawdziwą kopalnią trudniejszych problemów jest [repozytorium](https://github.com/norvig/pytudes) <NAME>viga, dyrektora badań w Google. Wiele z jego problemów pochodzi od [The Riddler](https://fivethirtyeight.com/tag/the-riddler/).
#
# Lista ta mogłaby się długo ciągnąć. Część ćwiczeń może wydawać się czasem niepraktyczna ("znowu reszta z dzielenia, kiedy ja chcę robić strony internetowe!?"), ale nie daj się zwieść - rozwiązywanie takich zadań nie tylko kształtuje umysł, ogólne myślenie logiczne i zdolność rozwiązywania problemów, tak u dzieci, jak i dorosłych, ale także przygotowuje do bardzo prawdziwych wyzwań stojących przed programistą. To też filozofia za japońską koncepcją "kata", gdzie przez wielokrotne powtarzanie czynności nabywa się w niej mistrzostwo (zob. np. [tutaj](http://codekata.com/)). Wreszcie, proces rekrutacyjny w wielu firmach technologicznych polega na rozwiązywaniu dokładnie takich ćwiczeń; świetnym przykładem jest tajny proces rekrutacyjny Google, tzw. ["Google foo.bar Challenge"](https://www.freecodecamp.org/news/the-foobar-challenge-googles-hidden-test-for-developers-ed8027c1184/).
# #### Tutoriale
# Jest mnóstwo wszelkich **tutoriali** Pythona - gość na nich często w poszukiwaniu inspiracji i eleganckich rozwiązań! Wymieńmy choćby:
#
# - [Real Python](https://realpython.com/),
#
# - [W3 Schools](https://www.w3schools.com/python/),
#
# - [ten kurs](https://www.python-course.eu/),
#
# - [Geeks for Geeks](https://www.geeksforgeeks.org/) itd.
#
# Absolutnie kluczowym portalem jest [Stack Overflow](https://stackoverflow.com/) ("stack" to stos - pamiętasz go z Lekcji 5? a problem "stack overflow" może być sposodowany np. przez bardzo głęboką rekursję), forum pytań i odpowiedzi na wszelkie tematy związane z programowaniem. Wiele czasu każdego programisty jest poświęcone na szukaniu tam odpowiedzi!
#
# Rozważ subskrypcję [Medium](https://medium.com/), gdzie znajdziesz niekończący się strumień czasem lepszych, czasem gorszych, a czasem wyśmienitych artykułów na różne tematy, m.in. te związane z Pythonem.
# #### Specjalizacja
# Zastanów się, jaka część programowania interesuje cię najbardziej i spróbuj zacząć szkolić się w tym **wybranym kierunku**. Na tym kursie poznaliśmy zręby tzw. biblioteki standardowej Pythona, ale prócz niej istnieją niezliczone biblioteki i rozwiązania specjalistyczne, przeznaczone do konkretnych celów.
#
# - Interesuje cię praca z danymi? Fundamentalna będzie `pandas`.
#
# - Chcesz zacząć przygodę z uczeniem maszynowym i sztuczną inteligencją? Prócz `pandas` konieczne będą `numpy`, `scikit-learn` i `TensorFlow`. Przeczytaj też [najlepsze wprowadzenie](https://www.amazon.com/Hands-Machine-Learning-Scikit-Learn-TensorFlow/dp/1491962291) do tematu.
#
# - Chcesz w końcu pisać te strony internetowe!? Poświęć się uczeniu `Django` i `Flask`.
#
# - Chcesz tworzyć wizualizacje i dashboardy? Zapoznaj się z `matplotlib`, `seaborn`, `Bokeh` i `Dash`.
#
# Interesuje cię tworzenie GUIs (Graphical User Interfaces), programownie Raspberry Pi, analiza danych geo-przestrzennych, bioinformatyka, kryptografia, etyczne hakerstwo, pisanie gier, obliczenia naukowe, automatyzacja codziennych zadań na komputerze... wszystko to - i dużo więcej - możesz zrobić w Pythonie. I to... _elegancko_ 😉.
| 9-10_Functions/Lekcja 9-10, Funkcje.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Apache Beam Notebooks for Streaming NLP on Real-time Tweets
# In this demo we will walk through setting up a local client to gather tweets using the `tweepy` API. After that we will using the interactive runner in Apache Beam notebooks to build a pipeline to do natural language processing on tweets in real-time. One of the advantages of using the interactive runner is that we can explore the intermediate outputs for our pipeline while building the pipeline!
#
# At the end of the notebook we will turn the relevant parts of the notebook into a script where we can deploy our streaming pipeline on Cloud Dataflow.
#
# First, let us look at the script we will be using to gather our tweets and publish them to Pub/Sub.
# NoExport
# !cat tweet-setup.sh
# After installing some packages, we will run the `tweets-gatherer.py` script. This script will not be covered explicitly in the demo, but it is recommended to glance through the code and see how the Tweepy API and Pub/Sub client are being used.
#
# Note that you need to have a Twitter Developer Account to run this script. The free version of the account will suffice and you can sign up here. We need the the Twitter API Consumer Key/Secret and the Twitter API Access Key/Secret for our client to be able to search and pull tweets in real time. These tweets will be published to a Pub/Sub topic in your project created by the script above.
#
# Before moving forward, insert your Twitter Developer API keys, open a terminal (File > New > Terminal) and run the command `bash tweet-setup.sh`. If you already have a Pub/Sub topic named `tweet-nlp-demo` or a BigQuery dataset named `tweet_nlp_demo` then you can ignore the corresponding error messages.
# Before we begin to build our pipeline, we need to install a couple of Python client libraries. After doing this, you should reset the notebook kernel (Kernel > Restart Kernel) so that the packages are properly picked up. It may take a few minutes to install the packages.
# NoExport
# %pip install google-cloud-translate google-cloud-language
# We will start by importing the packages that we need for the notebook. The first code block contains packages that we will need when we submit the pipeline to Dataflow, so we will want to include the code cell in the exported script. **Before running the cell, be sure to change the Project ID to your own**. The rest of the variables (`OUTPUT_DATASET`, `OUTPUT_TABLE_UNAGG`,`OUTPUT_TABLE_AGG`, and `INPUT_TOPIC`) refer to objects created within the lab.
# +
import argparse, os, json, logging
from datetime import datetime, timedelta
import json
import pandas as pd
import apache_beam as beam
from apache_beam.transforms import trigger
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.options.pipeline_options import GoogleCloudOptions, PipelineOptions, SetupOptions, StandardOptions
import google.auth
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from google.cloud import translate_v2 as translate
print('Beam Version:', beam.__version__)
PROJECT_ID = 'your-project-id-here' #TODO: CHANGE PROJECT ID
OUTPUT_DATASET = 'tweet_nlp_demo'
OUTPUT_TABLE_UNAGG = 'processed_tweet_data'
OUTPUT_TABLE_AGG = 'aggregated_tweet_data'
INPUT_TOPIC = "projects/{}/topics/tweet-nlp-demo".format(PROJECT_ID)
# -
# However, the next cell contains code to import the interactive runner we will use to explore the pipeline within the notebook. We do not want to include this in the final script so we will annotate it as such.
# NoExport
from apache_beam.runners.interactive import interactive_runner
import apache_beam.runners.interactive.interactive_beam as ib
# Next we define our pipeline options. Since we wish to deal with data in real-time, we will set the streaming option to `True` to ensure that the pipeline runs indefinitely. The behavior differs slightly when we wish to use the interactive runner, but we will address that in just a moment.
# +
# Setting up the Beam pipeline options.
options = PipelineOptions()
# Sets the pipeline mode to streaming, so we can stream the data from PubSub.
options.view_as(StandardOptions).streaming = True
# Sets the project to the default project in your current Google Cloud environment.
# The project will be used for creating a subscription to the PubSub topic.
_, options.view_as(GoogleCloudOptions).project = google.auth.default()
# -
# Now we set up our interactive runner. Note that we're setting a capture duration of 60 seconds. Instead of waiting indefinitely for more data to come in, we will collect 60 seconds worth of data and load it into an in-memory PCollection. That way we can visualize the results one transform at a time while building our pipeline. When we run the pipeline in Dataflow, we will want to run the pipeline indefintely.
# NoExport
ib.options.capture_duration = timedelta(seconds=60)
p = beam.Pipeline(interactive_runner.InteractiveRunner(), options=options)
# **DO NOT RUN THE NEXT CELL IN THE NOTEBOOK!!!** The next cell defines all of the options for running the pipeline on Dataflow and we do not want to run this in the notebook. The cell is left here (uncommented) so that it will properly be included when we run `nbconvert` after exploring our pipeline.
from apache_beam.runners import DataflowRunner
options.view_as(StandardOptions).runner = 'DataflowRunner'
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.job_name = 'tweet-nlp-pipeline'
google_cloud_options.staging_location = 'gs://{}/binaries'.format(PROJECT_ID)
google_cloud_options.temp_location = 'gs://{}/temp'.format(PROJECT_ID)
google_cloud_options.region = 'us-central1'
p = beam.Pipeline(DataflowRunner(), options=options)
# Now we are ready to start building our pipeline! We start by reading in tweets from our Pub/Sub topic using the `ReadFromPubSub` connector. After that we will use the `json.loads` function to parse the incoming JSON blob containing the text of the tweet and its attributes.
# +
# So that Pandas Dataframes do not truncate data...
pd.set_option('display.max_colwidth', -1)
tweets = p | 'ReadTweet' >> beam.io.gcp.pubsub.ReadFromPubSub(topic=INPUT_TOPIC) | beam.Map(json.loads)
# -
# What we did in the previous cell was add two transformations to our pipelines DAG (Directed Acyclic Graph). We have not processed any data yet! We can use `ib.show` to ingest data from our Pub/Sub topic for 60 seconds (per our `capture_duration` option above) and store the data in an in-memory PCollection, we then apply `json.loads` to the elements of the PCollection and can visualize the results via Pandas.
#
# **WARNING:** The incoming tweets are (unfiltered) tweets containing the search term "pizza". Though the search term was chosen to be as uncontroversial as possible, anything could be in these tweets. Of course, this includes possibly very offensive material.
# NoExport
ib.show(tweets)
# Now we can see the JSON blobs sent to Pub/Sub by the Twitter API. However we are only going to want certain properties of the messages for our goal. Let's take the "text", "created_at" and "source" fields for each message and pack them into a dictionary. We will create a custom function `parse_fields` and apply it in our pipeline once again using `beam.Map`.
# +
def parse_fields(tweet):
trim = {}
trim['text'] = tweet['messages'][0]['data']['text']
trim['created_at'] = tweet['messages'][0]['data']['created_at']
trim['source']=tweet['messages'][0]['data']['source']
return trim
parsed_tweets = tweets | "Parse_Tweet" >> beam.Map(parse_fields)
# -
# Let us quickly use `ib.show` again to see the results of our parsing. Note that the output of the previous steps is still in an in-memory PCollection, so we do not have to wait a minute for data to come in through the Pub/Sub IO Connection again.
# NoExport
ib.show(parsed_tweets)
# Note that the dictionaries are parsed by the interactive runner so that when we visualize the data it is presented as a table. Before we move on, we can use the `ib.show_graph` to visualize our pipeline.
# NoExport
ib.show_graph(p)
# We can see the transforms (in boxes) with the cell numbers corresponding to them. In the circles between the tranforms, we can see the names of the corresponding PCollections. Note that between the `ReadTweet` and the `Map(loads)` transforms the name was generated by Beam since we did not assign a name ourselves.
#
# Now we are ready to begin applying machine learning to the data. The NLP (Natural Language Processing) API only supports certain languages for sentiment analysis. So, what we will do is first use the Translation API to detect the language. We will create a Python function, `detect_language`, to call the Translation API and add it to our pipeline once again using `beam.Map`.
# +
def detect_language(tweet):
translate_client = translate.Client()
text = tweet['text']
result = translate_client.detect_language(text)
tweet['language'] = result['language']
tweet['lang_confidence'] = result['confidence']
return tweet
lang_tweets = parsed_tweets | "Detect_Language" >> beam.Map(detect_language)
# -
# Let us now detect the language of our tweets. Note that we will also record the confidence in the API's predictions ('lang_confidence') for later reference.
# NoExport
ib.show(lang_tweets)
# Now we are ready to perform sentiment analysis on our tweets. We will invoke the NLP API to analyze the sentiment of tweets involving the term "pizza". Note that the translation of "pizza" is "pizza" in many languages, including French, German, Itaian, Portugese, and Spanish. These are lanaguages that are supported by the NLP API, so we will will filter based off the language detected by the Translation API. In the case that we are not working with one of these languages, we will assign a `None` value to the score and magnitude fields.
#
# As in the previous steps, we will invoke the API using a function and then call the function in our pipeline using `beam.Map`.
# +
def analyze_sentiment(tweet):
client = language_v1.LanguageServiceClient()
type_ = enums.Document.Type.PLAIN_TEXT
if tweet['language'] in ['en', 'fr', 'de', 'it', 'pt', 'es']:
language = tweet['language']
document = {"content": tweet['text'], "type": type_, "language": language}
encoding_type = enums.EncodingType.UTF8
response = client.analyze_sentiment(document, encoding_type=encoding_type)
tweet['score'] = response.document_sentiment.score
tweet['magnitude'] = response.document_sentiment.magnitude
else:
tweet['score'] = None
tweet['magnitude'] = None
return tweet
analyzed_tweets = lang_tweets | "Analyze_Tweets" >> beam.Map(analyze_sentiment)
# -
# And as before, let us take a look into our processed tweets by using `ib.show`.
# NoExport
ib.show(analyzed_tweets, include_window_info=True)
# We now have all of the information that we need to start performing our aggregations. However, there's one more thing we should address first. The date-timestamp (DTS) that Dataflow uses by default is the Pub/Sub publication time (when using the `ReadFromPubSub` connector). However, we would rather sort the tweets in the context of when they are published to Twitter. Above we can see that the `event_time` field and the `created_at` times are slightly different. We can replace the timestamp with the one in the `created_at` field.
# +
def custom_timestamp(tweet):
ts = datetime.strptime(tweet["created_at"], "%Y-%m-%dT%H:%M:%S")
return beam.window.TimestampedValue(tweet, ts.timestamp())
analyzed_tweets_w_dts = analyzed_tweets | 'CustomTimestamp' >> beam.Map(custom_timestamp)
# -
# NoExport
ib.show(analyzed_tweets_w_dts, include_window_info=True)
# In our example here we will group our data into sliding windows of length 30 seconds and starting every 10 seconds. We do this by using the `beam.WindowInto` transform and specifying the window type, length, and offset using `beam.window.SlidingWindows`.
windowed_tweets = analyzed_tweets_w_dts | "Window" >> beam.WindowInto(beam.window.SlidingWindows(30, 10))
# What does this actually do to our data in out PCollection? The best thing to do here is go ahead and take a peek into the output of the pipeline up to this point using `ib.show`. We will set the `include_window_info` flag to `True` so that we can peek into how windows are assigned.
# NoExport
ib.show(windowed_tweets, include_window_info=True)
# Did you notice something above? Every tweet is now triplicated, with one entry for each window it belongs to. Another thing to notice is that we have simply *assigned* the windows at this point, the data has not been grouped into windows yet.
#
# We want to measure sentiment over time depending on the source of the tweet. To do this, let us create a "key-value" pair for each tweet. Strictly speaking, we do not have a key-value pair construction in Python, but Beam will treat the first value of an ordered pair as a "key" and the second value of the ordered pair as the "value".
#
# The key will be the source of the tweet and the value will be a dictionary of the score and magnitude of the tweet. We will be using both of these data points in the next transform.
#
# We follow a similar pattern from before: we create a Python function to perform our element-wise computation. However you may notice something new here. We `yield` instead of `return` at the end of our function. We do this because we want to return a generator instead of a single element. But why? Note that `create_source_key` does not return anything in the case that we did not assign a score above! So we either return nothing or a generator with a single element. We then add the transform to the pipeline using `beam.FlatMap`. `FlatMap` is perfect for any non-1:1 transform such as `create_source_key`; `FlatMap` expects the function being applied to return a generator and it will manage cycling through the generator when the PCollection is passed to the next transform.
# +
def create_source_key(tweet):
if tweet['score']:
yield (tweet['source'], {'score': tweet['score'], 'magnitude': tweet['magnitude']})
prepped_tweets = windowed_tweets | "Create_Source_Key" >> beam.FlatMap(create_source_key)
# -
# NoExport
ib.show(prepped_tweets)
# Now we are ready to perform our aggregation. We will combine a weighted average of scores per window and per source. We will use the magnitude as our weight for the weighted average. However, there is not a built-in transform for performing this task!
#
# We will create our own custom combiner by extending `beam.CombineFn`. We need to define four functions when extending `beam.CombineFn` to create our custom combiner:
# 1. `create_accumulator`: We initialize the information we will be passing from node to node. In our case we have an ordered pair (sum, count) where sum is the running sum of weighted scores.
# 2. `add_input`: When we wish to include a new data point, how is it incorporated? We will add the magnitude times the score to the sum and increment the count by 1.
# 3. `merge_accumulators`: We will be computing the accumulators where they live in the cluster, what do we do when we need to shuffle data for the final aggregation? This is why we are passing ordered pairs instead of averages, we can simple combine the sums and the counts.
# 4. `extract_output`: This is the function that computes the final output. We finally combine our final weighted average by dividing the sum by the count. However, we need to anticipate the case that the count is 0 (as initally set). In this case, we will set the score to be `NaN`.
#
# Once we have created our custom combiner, we can apply it in our pipeline by calling `beam.CombinePerKey`.
# +
class WeightedAverageFn(beam.CombineFn):
def create_accumulator(self):
return (0.0, 0)
def add_input(self, sum_count, input):
sum, count = sum_count
return sum + input['score'] * input['magnitude'], count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
sum, count = sum_count
return {'score': sum / count, 'count': count} if count else {'score':float('NaN'), 'count': 0}
aggregated_tweets = prepped_tweets | "Aggregate_Weighted_Score" >> beam.CombinePerKey(WeightedAverageFn())
# -
# Let us take a quick peek at the output of our aggregations
# NoExport
ib.show(aggregated_tweets, include_window_info=True)
# We're almost there! Let us just clean up our output to put it into a more convenient form for loading into BigQuery.
# +
def parse_aggregation(agg_tweets):
result = {}
result['source'] = agg_tweets[0]
result['score'] = agg_tweets[1]['score']
result['count'] = agg_tweets[1]['count']
return result
parsed_aggregated_tweets = aggregated_tweets | "Parse_Aggregated_Results" >> beam.Map(parse_aggregation)
# -
# NoExport
ib.show(parsed_aggregated_tweets,include_window_info=True)
# We have created all of the transforms for our pipeline and we are ready to start analyzing and processing the entire real-time stream (versus working with a small in-memory PCollection). We will wrap up by defining two transforms to load data into BigQuery. We will load the aggregated tweet data (`parsed_aggregated_tweets`) and the unaggregated, analyzed tweets to a different table (`analyzed_tweets`). Keeping the unaggregated, analyzed tweets will allow us to go back and further analyze the individual tweets if another question arises without having to reprocess. Of course, we are paying to store the tweets in BigQuery, but this is much cheaper than having to reprocess.
# +
table_spec_unagg = bigquery.TableReference(
projectId = PROJECT_ID,
datasetId = OUTPUT_DATASET,
tableId= OUTPUT_TABLE_UNAGG)
table_schema_unagg ='text:STRING, created_at:TIMESTAMP, source:STRING, language:STRING, lang_confidence:FLOAT64, score:FLOAT64, magnitude:FLOAT64'
bq_output_unagg = analyzed_tweets | 'WriteToBQ_Unagg'>> beam.io.WriteToBigQuery(table_spec_unagg,
schema=table_schema_unagg,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)
table_spec_agg = bigquery.TableReference(
projectId = PROJECT_ID,
datasetId = OUTPUT_DATASET,
tableId= OUTPUT_TABLE_AGG)
table_schema_agg ='source:STRING, score:FLOAT64, count:INT64, window_start:TIMESTAMP'
bq_output_agg = parsed_aggregated_tweets | 'WriteToBQ_Agg'>> beam.io.WriteToBigQuery(table_spec_agg,
schema=table_schema_agg,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)
# -
# Now we can finally go back and look at our completed graph. Note that by applying `bq_output_unagg` to `analyzed_tweets` we have created a branch in the pipeline.
# NoExport
ib.show_graph(p)
# Everything is ready for deploying to Dataflow! We will use the `nbconvert` tool to export this Jupyter Notebook into a Python script, so we can execute the script in other environments without having to install a tool to run notebooks. The cells that were flagged as `NoExport` will not be included in the script. These were cells that used the interactive runner or cells used to work within the notebook environment that we don't need when submitting to Dataflow.
#
# The final cell of the notebook includes the `p.run()` call that we need to execute the pipeline on Dataflow. You do not need to run that cell within the notebook.
# NoExport
# !jupyter nbconvert --to script --RegexRemovePreprocessor.patterns="['# NoExport']" TweetPipeline.ipynb
# Let us go ahead and submit the job to Dataflow! We will do this by using executing the Python script we just created. After you run the cell be sure to check out the job running in Dataflow and the output in your BigQuery dataset.
# NoExport
# !pip install apache_beam google-cloud-language google-cloud-translate google-apitools
# !echo "google-cloud-translate==2.0.1" > requirements.txt
# !python3 TweetPipeline.py --save_main_session --requirements_file requirements.txt
# Don't run this cell within the notebook!
logging.getLogger().setLevel(logging.INFO)
p.run()
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| courses/data-engineering/demos/tweet_nlp_beam_notebook/TweetPipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Strings
#
# Strings in Python are shown as the variable type **str**. You can define a string with either double quotes **"** or single quotes '. If the string you are creating actually has one of these two values in it, then you need to be careful to assure your code doesn't give an error.
my_string = 'this is a string!'
my_string2 = "this is also a string!!!"
# You can also include a \ in your string to be able to include one of these quotes:
this_string = 'Simon's skateboard is in the garage.'
print(this_string)
this_string = 'Simon's skateboard is in the garage.'
print(this_string)
# The color highlighting is also an indication of the error you have in your string in this second case. There are a number of other operations you can use with strings as well. In this video you saw a few:
# +
first_word = 'Hello '
second_word = 'There'
print(first_word + second_word)
print(first_word + ' ' + second_word)
print(first_word * 5)
print(len(first_word))
# -
# Unlike the other data types you have seen so far, you can also index into strings, but you will see more on this soon! For now, here is a small example. Notice Python uses 0 indexing - we will discuss this later in this lesson in detail.
first_word[0]
first_word[1]
# ## The len() function
# len() is a built-in Python function that returns the length of an object, like a string. The length of a string is the number of characters in the string. This will always be an integer.
#
# There is an example above, but here's another one:
print(len("ababa") / len("ab"))
# You know what the data types are for len("ababa") and len("ab"). Notice the data type of their resulting quotient here
# ## Quiz: Fix the Quote
# The line of code in the following quiz will cause a SyntaxError, thanks to the misuse of quotation marks. First run it with Test Run to view the error message. Then resolve the problem so that the quote (from <NAME>) is correctly assigned to the variable ford_quote.
# TODO: Fix this string!
ford_quote = 'Whether you think you can, or you think you can't--you're right.'
# +
# Correction
# -
# ### Operators and Strings
#
#
# We’ve already seen that the type of objects will affect how operators work on them. What will be the output of this code?
coconut_count = "34"
mango_count = "15"
tropical_fruit_count = coconut_count + mango_count
#print(tropical_fruit_count)
# ## Quiz: Write a Server Log Message
# In this programming quiz, you’re going to use what you’ve learned about strings to write a logging message for a server.
#
# You’ll be provided with example data for a user, the time of their visit and the site they accessed. You should use the variables provided and the techniques you’ve learned to print a log message like this one (with the username, url, and timestamp replaced with values from the appropriate variables):
#
# Yogesh accessed the site **http://petshop.com/pets/reptiles/pythons** at 16:20.
#
# Use the Test Run button to see your results as you work on coding this piece by piece.
# +
username = "Kinari"
timestamp = "04:50"
url = "http://petshop.com/pets/mammals/cats"
# TODO: print a log message using the variables above.
# The message should have the same format as this one:
# "Yogesh accessed the site http://petshop.com/pets/reptiles/pythons at 16:20."
# -
# ## Quiz: len()
# Use string concatenation and the len() function to find the length of a certain movie star's actual full name. Store that length in the name_length variable. Don't forget that there are spaces in between the different parts of a name!
# +
given_name = "William"
middle_names = "Bradley"
family_name = "Pitt"
name_length = #todo: calculate how long this name is
# Now we check to make sure that the name fits within the driving license character limit
# Nothing you need to do here
driving_license_character_limit = 28
print(name_length <= driving_license_character_limit)
# -
# ### len and Integers
#
#
# We've just used the len function to find the length of strings. What does the len function return when we give it the integer 835 instead of a string?
| SECTION 1/EMDI_LECTURE_6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
# %config InlineBackend.figure_format ='retina'
# +
x = np.linspace(1E-2, 1E1, int(1E3))
y1 = lambda y: 1 /(2 * np.pi) * y**(-2)
y2 = lambda y: 2 / np.pi * np.arctan(1/(2 * y * (2 + 4*y**2)**(1/2)))
plt.figure(dpi=200, figsize=(3, 3))
plt.loglog(x, y2(x), 'k-', label=r'$E(y/L)$')
plt.loglog(x, y1(x), 'r--', label=r'$\frac{1}{2 \pi} E_0 L^2 y^{-2}$')
plt.loglog(x, np.ones(len(x)), 'b--', label=r'$E_0$')
plt.xlabel(r'$y/L$')
plt.ylabel(r'$E/E_0$')
plt.xlim(1E-2, 1E1)
plt.ylim(1E-2, 3E0)
plt.legend()
plt.show()
# +
E0 = 1
b = 1
E1 = lambda y: 2 / np.pi * E0 * np.arctan(b**2 / (2 * y * (2 * b**2 + 4 * y**2)**(1/2)))
E2 = lambda y: E0 * (4/np.pi * np.arctan((1 + b**2 / (2 * y**2))**(1/2)) - 1)
E3 = lambda y: 2 / np.pi * E0 * np.arctan(b**2 / (y * (y**2 + b**2)**(1/2)))
x = np.linspace(1E-2, 1E1, int(1E3))
plt.figure(dpi=200, figsize=(3, 3))
#for k in np.linspace(0.1,3, 10):
plt.loglog(x, E1(x), 'k-', label='E1')
plt.loglog(x, E2(x), 'b--', label='E2')
plt.loglog(x, E3(x), 'r-', label='E3')
plt.xlabel(r'$y/L$')
plt.ylabel(r'$E/E_0$')
plt.xlim(1E-2, 1E1)
plt.ylim(1E-2, 1E1)
plt.legend()
plt.show()
# -
2/np.pi
| src/scratch.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# # Condicionales
#
# #### con la palabra reservada `if`
# En Julia, la sintaxis
#
# ```julia
# if *condición 1*
# *opción 1*
# elseif *condición 2*
# *opción 2*
# else
# *opción 3*
# end
# ```
#
# nos permite evaluar condicionalmente alguna de nuestras opciones.
#
# Por ejemplo, podríamos querer implementar la prueba FizzBuzz: dado un número, N, imprime "Fizz" si N es divisible por 3, "Buzz" si N es divisible por 5 y "FizzBuzz" si N es divisible por 3 y 5. De otra manera, solo imprime el número. Ingresa tu opción para `N` aquí:
N = 17
if (N % 3 == 0) && (N % 5 == 0) # `&&` significa "AND"; % calcula el residuo después de la división
println("FizzBuzz")
elseif N % 3 == 0
println("Fizz")
elseif N % 5 == 0
println("Buzz")
else
println(N)
end
# #### con operadores ternarios
#
# Para este último bloque, podríamos usar en su lugar el operador ternario con la sintaxis:
#
# ```julia
# a ? b : c
# ```
#
# que es igual a
#
# ```julia
# if a
# b
# else
# c
# end
# ```
# Ahora, digamos que queremos regresar el más grande de dos números. Da los valores para `x` y `y` aquí:
x = 15
y = 10
# Usando las palabras reservadas `if` y `else`, podríamos escribir:
if x > y
x
else
y
end
# y como operador ternario, el condicional se ve así:
(x > y) ? x : y
# #### con evaluación en cortocircuito
#
# Ya hemos visto expresiones con la sintaxis
#
# ```julia
# a && b
# ```
# que regresa `true` si ambos `a` y `b` son `true`. Por supuesto, si `a` es `false`, Julia ni siquiera necesita saber el valor de `b` para determinar que el resultado de todo será `false`. Así que Julia ni siquiera necesita verificar qué es `b`; puede solo "poner en cortocircuito" y regresar `false` de inmediato. El segundo argumento `b` podría ser una expresión más complicada como una llamada a una función son un efecto secundario, en cuyo caso ni siquiera será llamado:
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
typeof(true)
# -
true && (println("hi"); true)
true && (println("hi"); true)
# Por el otro lado, si `a` es `true`, Julia sabe que puede solo regresar el valor de `b` como la expresión en general. ¡Esto significa que `b` no necesita necesariamente evaluar a `true` o `false`! Incluso podría ser que `b` fuera un error:
(x > 0) && error("x cannot be greater than 0")
# De manera similar, verifica el operador `||`, que también usa evaluación de cortocircuito para realizar la operación *or*.
true || println("hi")
# y...
false || println("hi")
# ### Ejercicios
#
# #### 5.1
# Escribe un enunciado condicional que imprima un número si el número es par y la cadena "odd" si el número es impar.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Ingresa tu respuesta...
numero = 10
if (numero % 2 == 0)
println(numero)
else
println("odd")
end
# -
# #### 5.2
# Escribe nuevamente el código del ejercicio 5.1, pero esta vez utiliza un operador ternario.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
(numero % 2 == 0) ? numero : println("odd")
| notebooks/old_jupyter/05.Condicionales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# A,B,C,D = map(int, input().split())
# a = []
# for i in range(N):
# a.append((input()))
# -
N = int(input())
numbers = list(map(int, input().split()))
numbers
# +
for i in numbers:
for j in numbers:
x = i ^ j
if x in numbers:
numbers.remove(i)
numbers.
# -
def foo(x,y, numbers):
answer = 0
if len(numbers) == 0:
answer = "Yes"
return answer
for i in numbers:
if x ^ i == y:
numbers.remove(x)
numbers.remove(y)
numbers.remove(i)
foo(y,i,numbers)
else:
continue
return answer
foo(1,2,numbers)
# +
import sys
N = int(input())
numbers = list(map(int, input().split()))
numbers_2 = numbers
def foo(x,y, numbers):
if x in numbers:
numbers.remove(x)
numbers.remove(y)
if len(numbers) == 0:
print("Yes")
sys.exit()
for i in numbers:
if x ^ i == y:
foo(y,i,numbers)
else:
continue
print("No")
sys.exit()
for i in numbers_2:
for j in numbers_2:
if not numbers_2.index(i) == numbers_2.index(j):
foo(i,j,numbers_2)
# -
1
| Grand_contest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cloud Level 2 Subsetter (L2SS) API
# This will demonstrate how to subset swath/L2 data with the data and services hosted on the cloud.
#
# ## Before you start (UAT login)
# Before you begin this tutorial, make sure you have an account in the Earthdata Login UAT environment, which
# will be used for this notebook by visiting [https://uat.urs.earthdata.nasa.gov](https://uat.urs.earthdata.nasa.gov).
# These accounts, as all Earthdata Login accounts, are free to create and only take a moment to set up.
# ## Set Up Authentication
#
# We need some boilerplate up front to log in to Earthdata Login. The function below will allow Python
# scripts to log into any Earthdata Login application programmatically. To avoid being prompted for
# credentials every time you run and also allow clients such as curl to log in, you can add the following
# to a `.netrc` (`_netrc` on Windows) file in your home directory:
#
# ```
# machine uat.urs.earthdata.nasa.gov
# login <your username>
# password <<PASSWORD>>
# ```
#
# Make sure that this file is only readable by the current user or you will receive an error stating
# "netrc access too permissive."
#
# `$ chmod 0600 ~/.netrc`
#
# +
from urllib import request
from http.cookiejar import CookieJar
import getpass
import netrc
def setup_earthdata_login_auth(endpoint):
"""
Set up the request library so that it authenticates against the given Earthdata Login
endpoint and is able to track cookies between requests. This looks in the .netrc file
first and if no credentials are found, it prompts for them.
Valid endpoints include:
uat.urs.earthdata.nasa.gov - Earthdata Login UAT (Harmony's current default)
urs.earthdata.nasa.gov - Earthdata Login production
"""
try:
username, _, password = netrc.netrc().authenticators(endpoint)
except (FileNotFoundError, TypeError):
# FileNotFound = There's no .netrc file
# TypeError = The endpoint isn't in the netrc file, causing the above to try unpacking None
print('Please provide your Earthdata Login credentials to allow data access')
print('Your credentials will only be passed to %s and will not be exposed in Jupyter' % (endpoint))
username = input('Username:')
password = <PASSWORD>()
manager = request.HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, endpoint, username, password)
auth = request.HTTPBasicAuthHandler(manager)
jar = CookieJar()
processor = request.HTTPCookieProcessor(jar)
opener = request.build_opener(auth, processor)
request.install_opener(opener)
# -
# Now call the above function to set up Earthdata Login for subsequent requests
setup_earthdata_login_auth('uat.urs.earthdata.nasa.gov')
# ### Find a granule for subsetting
#
# Below we call out a specific file/granule on which we will use the podaac L2 subsetter. Finding this information would complicate the tutorial, but there are several other demos that show how to use the search APIs.
# ## Subset of a PO.DAAC Granule
#
# We can now build onto the root URL in order to actually perform a transformation. The first transformation is a subset of a selected granule. _At this time, this requires discovering the granule id from CMR_. That information can then be appended to the root URL and used to call Harmony with the help of the `request` library.
#
# Above we show how to find a granule id for processing.
#
# **Notes:**
# The L2 subsetter current streams the data back to the user, and does not stage data in S3 for redirects. This is functionality we will be adding over time.
# It doesn't work with URS backed files, which is coming in the next few weeks
# it only works on the show dataset, but
#
bblat_max=1
bblat_min=0
bblon_max=1
bblon_min=0
# +
harmony_root = 'https://harmony.uat.earthdata.nasa.gov'
bboxSubsetConfig = {
'collection_id': 'C1234208437-POCLOUD', #Jason-1 GDR SSHA version E NetCDF
'ogc-api-coverages_version': '1.0.0',
'variable': 'all',
'granuleid': 'G1234220250-POCLOUD', #JA1_GPR_2PeP001_092_20020118_182623_20020118_192236.nc
'lat': '(' + str(bblat_min) + ':' + str(bblat_max) + ')',
'lon': '(' + str(bblon_min) + ':' + str(bblon_max) + ')'
}
bbox_url = harmony_root+'/{collection_id}/ogc-api-coverages/{ogc-api-coverages_version}/collections/{variable}/coverage/rangeset?granuleid={granuleid}&subset=lat{lat}&subset=lon{lon}'.format(**bboxSubsetConfig)
print('Request URL', bbox_url)
# -
import shutil
with request.urlopen(bbox_url) as response, open('ogc_temp.nc', 'wb') as out_file:
print('Content Size:', response.headers['Content-length'])
shutil.copyfileobj(response, out_file)
print("Downloaded request to ogc_temp.nc")
import xarray as xr
ds = xr.open_dataset('ogc_temp.nc')
ds
# +
from pandas.plotting import register_matplotlib_converters
ds.ssha.plot()
# -
# ## Verify the subsetting worked
#
# Bounds are defined earlier
#
# +
lat_max = ds.lat.max()
lat_min = ds.lat.min()
lon_min = ds.lon.min()
lon_max = ds.lon.max()
if lat_max < bblat_max and lat_min > bblat_min:
print("Successful Latitude subsetting")
else:
assert false
if lon_max < bblon_max and lon_min > bblon_min:
print("Successful Longitude subsetting")
else:
assert false
# -
# ## Plot swath onto a map
# +
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.scatter(ds.lon, ds.lat, lw=2, c=ds.ssha)
plt.colorbar()
plt.clim(-0.3, 0.3)
plt.show()
# -
| notebooks/Cloud L2SS subset and plot - JH.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd, numpy as np
import kendo_romania
# Read data
matches={i:{} for i in range(1993,2019)}
# + active=""
# Import data
# -
# ## 2018
# ### CR
filename='rawdata/2018/CR/CR25 - Public.xlsx'
sheetname='List of matches'
column_keys={'match_type':2,'aka':{'name':5,'hansoku':6,'point1':7,'point2':8,'point3':9},
'shiro':{'name':15,'hansoku':14,'point1':11,'point2':12,'point3':13},'outcome':10,
'shinpan':{'fukushin1':16,'shushin':17,'fukushin2':18}}
matches[2018]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3)
# ### SL
filename='rawdata/2018/SL/Prezenta SL_WKC17.xlsx'
sheetname=['F','M']
matches[2018]['SL']=kendo_romania.get_matches_from_table(filename,sheetname,5)
# ### CN
filename='rawdata/2018/CN/Event management CN25.xlsx'
sheetname='Shiai'
column_keys={'match_type':3,'aka':{'name':5,'hansoku':6,'point1':7,'point2':8,'point3':9},
'shiro':{'name':15,'hansoku':14,'point1':11,'point2':12,'point3':13},'outcome':10,
'shinpan':{'fukushin1':16,'shushin':17,'fukushin2':18}}
shift=-1
matches[2018]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
matches[2018]['CN'][-13]
# ## 2017
# ### CN
categories=['Individual masculin','Echipe']
filename=['rawdata/2017/CN/'+i+'.xlsx' for i in categories]
sheetname='List of matches'
column_keys={'match_type':2,'aka':{'name':5,'hansoku':6,'point1':7,'point2':8,'point3':9},
'shiro':{'name':15,'hansoku':14,'point1':11,'point2':12,'point3':13},'outcome':10,
'shinpan':{'fukushin1':16,'shushin':17,'fukushin2':18}}
shift=0
matches[2017]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
categories=['Individual juniori mici','Individual juniori mari','Individual feminin']
filename=['rawdata/2017/CN/'+i+'.xlsx' for i in categories]
shift=-1
matches[2017]['CN']=matches[2017]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
# ### CR
categories=['Individual masculin']
filename=['rawdata/2017/CR/'+i+'.xlsx' for i in categories]
sheetname='List of matches'
column_keys={'match_type':2,'aka':{'name':5,'hansoku':6,'point1':7,'point2':8,'point3':9},
'shiro':{'name':15,'hansoku':14,'point1':11,'point2':12,'point3':13},'outcome':10}
shift=2
matches[2017]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
categories=['Individual juniori','Individual veterani','Individual feminin']
filename=['rawdata/2017/CR/'+i+'.xlsx' for i in categories]
shift=-1
matches[2017]['CR']=matches[2017]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
categories=['Echipe']
filename=['rawdata/2017/CR/'+i+'.xlsx' for i in categories]
shift=0
matches[2017]['CR']=matches[2017]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
# ### SL
filename='rawdata/2017/SL/Prezenta.xlsx'
sheetname=['F','M','J']
matches[2017]['SL']=kendo_romania.get_matches_from_table(filename,sheetname,6)
# ## 2016
# ### SL
filename='rawdata/2016/SL/Event management - stagiul 4.xlsx'
sheetname=['F','M']
matches[2016]['SL']=kendo_romania.get_matches_from_table(filename,sheetname,6)
sheetname=['J']
matches[2016]['SL']=matches[2016]['SL']+\
kendo_romania.get_matches_from_table(filename,sheetname,5)
# ### CN
categories=['Individual masculin']
filename=['rawdata/2016/CN/'+i+'.xlsx' for i in categories]
sheetname='List of matches'
column_keys={'match_type':2,'aka':{'name':5,'hansoku':6,'point1':7,'point2':8,'point3':9},
'shiro':{'name':15,'hansoku':14,'point1':11,'point2':12,'point3':13},'outcome':10}
shift=2
matches[2016]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
categories=['Individual feminin']
filename=['rawdata/2016/CN/'+i+'.xlsx' for i in categories]
shift=-1
matches[2016]['CN']=matches[2016]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
categories=['Echipe','Male team']
filename=['rawdata/2016/CN/'+i+'.xlsx' for i in categories]
shift=0
matches[2016]['CN']=matches[2016]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
categories=['Junior 1 individual','Junior 2 individual']
filename=['rawdata/2016/CN/'+i+'.xlsx' for i in categories]
shift=-1
matches[2016]['CN']=matches[2016]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,3,shift=shift)
# ### CR
filename='rawdata/2016/CR/Event management_CR23.2016.xlsx'
sheetname=['IF_m','IJ_m','IM_m','IS_m']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2016]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
sheetname=['EJ_m','ES_m']
matches[2016]['CR']=matches[2016]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
# ## 2015
# ### SL
filename='rawdata/2015/SL/Event management - stagiul 5.xlsx'
sheetname=['SF_s','SM_s']
matches[2015]['SL']=kendo_romania.get_matches_from_table(filename,sheetname,6)
# ### CN
filename='rawdata/2015/CN/Event management_CN22.2015.xlsx'
sheetname=['IF_m','IJ2_m','IM_m']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2015]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
sheetname='E_m'
matches[2015]['CN']=matches[2015]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
# ### CR
filename='rawdata/2015/CR/Event management_CR22.2015.xlsx'
sheetname=['IF_m','IS_m']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2015]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
filename='rawdata/2015/CR/Event management_CR22.2015.xlsx'
sheetname=['IJ1_s']
matches[2015]['CR']=matches[2015]['CR']+\
kendo_romania.get_matches_from_table(filename,
sheetname,skiprows=7,shift=1,nrows=9)
filename='rawdata/2015/CR/Event management_CR22.2015.xlsx'
sheetname=['IJ2_s']
matches[2015]['CR']=matches[2015]['CR']+\
kendo_romania.get_matches_from_table(filename,
sheetname,skiprows=8,shift=12,nrows=8)
matches[2015]['CR']=matches[2015]['CR']+\
kendo_romania.get_matches_from_table(filename,
sheetname,skiprows=16,shift=12,nrows=8)
sheetname=['IM_s']
column_keys={'match_type':19,'aka':{'name':20,'point1':21},
'shiro':{'name':24,'point1':23},'outcome':22}
shift=0
matches[2015]['CR']=matches[2015]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
shift=10
matches[2015]['CR']=matches[2015]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
# ## 2014
# ### SL
filename='rawdata/2014/SL/Lista de participanti 6.xlsx'
sheetname=['SF_s','SM_s','J_s']
matches[2014]['SL']=kendo_romania.get_matches_from_table(filename,sheetname,6)
# ### CR
filename='rawdata/2014/CR/Event management_CR21.2014.xlsx'
sheetname=['IC-10_m','IC_m','IJ_m','IS_m','IF_m']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2014]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
sheetname=['IM_s']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=8
matches[2014]['CR']=matches[2014]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,8,shift=shift)
# ### CN
filename='rawdata/2014/CN/Event management_CN21.2014 - v2.xlsx'
sheetname=['IF_m']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2014]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
sheetname=['IM_s']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=19
matches[2014]['CN']=matches[2014]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
shift=29
matches[2014]['CN']=matches[2014]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
sheetname=['IJ1_s']
matches[2014]['CN']=matches[2014]['CN']+\
kendo_romania.get_matches_from_table(filename,sheetname,7,shift=1,nrows=10)
sheetname=['IJ2_s']
matches[2014]['CN']=matches[2014]['CN']+\
kendo_romania.get_matches_from_table(filename,sheetname,8,shift=12,nrows=6)
matches[2014]['CN']=matches[2014]['CN']+\
kendo_romania.get_matches_from_table(filename,sheetname,14,shift=12,nrows=6)
matches[2014]['CN']=matches[2014]['CN']+\
kendo_romania.get_matches_from_table(filename,sheetname,20,shift=12,nrows=6)
# ## 2013
# ### CN
filename='rawdata/2013/CN/Event management_CN2013.xlsx'
sheetname=['IS_m','IF_m','IC_m','IJ_m','E_m','IM_m']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2013]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
# ### CR
filename='rawdata/2013/CR/Event management_CR2013.xlsx'
sheetname=['IF_meciuri','IJ_meciuri','IM_meciuri']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2013]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
# ### SL
filename='rawdata/2013/SL/Event management.xlsx'
sheetname=['E_meciuri']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2013]['SL']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
sheetname=['Schema feminin']
matches[2013]['SL']=matches[2013]['SL']+\
kendo_romania.get_matches_from_table(filename,sheetname,2,nrows=14)
sheetname=['Schema juniori']
matches[2013]['SL']=matches[2013]['SL']+\
kendo_romania.get_matches_from_table(filename,sheetname,2,nrows=12)
# ## 2012
# ### CN
filename='rawdata/2012/CN/Event management CN2012.xlsx'
sheetname=['E_meciuri','IJ_meciuri','IF_meciuri','IM_meciuri']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':6,'point1':5},'outcome':3,
'shinpan':{'fukushin1':7,'shushin':8,'fukushin2':9}}
shift=0
matches[2012]['CN']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
# ### CR
filename='rawdata/2012/CR/2012.05.05-06 - CR - Cluj.xlsx'
sheetname=['IC']
matches[2012]['CR']=kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,12,shift=1,nrows=3)
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,18,shift=1,nrows=4)
sheetname=['IJ']
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,14,shift=1,nrows=3)
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,19,shift=1,nrows=3)
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,24,shift=1,nrows=3)
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,30,shift=1,nrows=3)
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,35,shift=1,nrows=3)
sheetname=['IF']
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,nrows=3)
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,18,shift=1,nrows=3)
column_keys={'match_type':0,'aka':{'name':1,'point1':3},
'shiro':{'name':6,'point1':5},'outcome':4}
shift=0
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,22,shift=shift)
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=6
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
sheetname=['ES']
column_keys={'match_type':20,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=-1
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
shift=4
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
shift=9
matches[2012]['CR']=matches[2012]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
# ## 2011
# ### CN
filename='rawdata/2011/CN/2011.11.26-27 - CN - Bucuresti_print.xlsx'
sheetname=['IJ']
matches[2011]['CN']=kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,nrows=3)
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,18,shift=1,nrows=3)
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,23,shift=1)
sheetname=['IF']
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,nrows=3)
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,18,shift=1,nrows=3)
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,23,shift=1,nrows=4)
column_keys={'match_type':0,'aka':{'name':1,'point1':3},
'shiro':{'name':6,'point1':5},'outcome':4}
shift=0
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,28,shift=shift)
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=5
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
shift=11
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
sheetname=['E']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=17
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,5,shift=shift)
shift=23
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,5,shift=shift)
shift=29
matches[2011]['CN']=matches[2011]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,5,shift=shift)
# ### CR
filename='rawdata/2011/CR/2011.04.16-17 - CR - Miercurea Ciuc.xlsx'
sheetname=['ES']
column_keys={'match_type':6,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=-1
matches[2011]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
shift=5
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
shift=11
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=5
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
shift=11
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
sheetname=['IF']
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,15,shift=1,nrows=4)
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,21,shift=1,nrows=4)
column_keys={'match_type':0,'aka':{'name':1,'point1':3},
'shiro':{'name':6,'point1':5},'outcome':4}
shift=0
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,26,shift=shift)
sheetname=['IJ']
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,16,shift=1,nrows=3)
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,21,shift=1,nrows=4)
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,27,shift=1,nrows=3)
sheetname=['IC']
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,4,shift=0,nrows=4)
sheetname=['EJ']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=0
matches[2011]['CR']=matches[2011]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,15,shift=shift)
# ## 2010
# ### CR
filename='rawdata/2010/CR/2010.03.27-28 - CR - Budeasa.xlsx'
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=5
matches[2010]['CR']=kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
shift=11
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
sheetname=['IF']
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,15,shift=1,nrows=4)
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,21,shift=1,nrows=4)
column_keys={'match_type':0,'aka':{'name':1,'point1':3},
'shiro':{'name':6,'point1':5},'outcome':4}
shift=0
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,26,shift=shift)
sheetname=['EJ']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=0
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,15,shift=shift)
sheetname=['IJ']
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,16,shift=1,nrows=3)
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,21,shift=1,nrows=4)
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,27,shift=1,nrows=3)
sheetname=['IC']
matches[2010]['CR']=matches[2010]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,4,shift=0,nrows=4)
# ### CN
filename='rawdata/2010/CN/2010.11.27-28 - CN - Bucuresti.xlsx'
sheetname=['IJ']
matches[2010]['CN']=kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,point_shift=0,nrows=5)
sheetname=['IC']
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,nrows=3)
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,18,shift=1,nrows=3)
sheetname=['IF']
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,nrows=3)
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,18,shift=1,nrows=3)
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=6
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
shift=12
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,4,shift=shift)
sheetname=['E']
column_keys={'match_type':15,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=-1
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,5,shift=shift)
shift=5
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,5,shift=shift)
shift=11
matches[2010]['CN']=matches[2010]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,5,shift=shift)
# ## 2009
# ### CN
filename='rawdata/2009/CN/2009.11.28-29 - CN - Bucuresti.xlsx'
sheetname=['IJ']
matches[2009]['CN']=kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,4,shift=0,nrows=4)
sheetname=['IF']
matches[2009]['CN']=matches[2009]['CN']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,12,shift=1,point_shift=0,nrows=5)
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=5
matches[2009]['CN']=matches[2009]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
shift=11
matches[2009]['CN']=matches[2009]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
sheetname=['ES']
column_keys={'match_type':1,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=-1
matches[2009]['CN']=matches[2009]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
shift=5
matches[2009]['CN']=matches[2009]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
shift=11
matches[2009]['CN']=matches[2009]['CN']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,7,shift=shift)
# ### CR
filename='rawdata/2009/CR/2009.04.04 - CR - Budeasa - print.xlsx'
sheetname=['IJ']
matches[2009]['CR']=kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,12,shift=1,point_shift=0,nrows=5)
sheetname=['IF']
matches[2009]['CR']=matches[2009]['CR']+\
kendo_romania.get_matches_from_table_oneliner(filename,
sheetname,13,shift=1,point_shift=0,nrows=6)
sheetname=['IM']
column_keys={'match_type':0,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=5
matches[2009]['CR']=matches[2009]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
shift=11
matches[2009]['CR']=matches[2009]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,6,shift=shift)
sheetname=['ES']
column_keys={'match_type':1,'aka':{'name':1,'point1':2},
'shiro':{'name':5,'point1':4},'outcome':3}
shift=-1
matches[2009]['CR']=matches[2009]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,8,shift=shift)
shift=5
matches[2009]['CR']=matches[2009]['CR']+\
kendo_romania.get_matches_from_list(filename,sheetname,column_keys,8,shift=shift)
# Clean up points, matches, player names
def match_cleaner(year,match):
kind,phase='Unknown','Unknown'
if '#' in match:
stage0=match.split('#')[0].lower()
stage1=match.split('#')[1]
if 'pool' in stage1:
phase='Pool'
if 'Pool' in stage1:
phase='Pool'
elif 'prel' in stage1:
phase='Prelim.'
elif 'Prel' in stage1:
phase='Prelim.'
elif 'layoff' in stage1:
phase='Prelim.'
elif '- F' in stage1:
phase='Finals'
elif 'F -' in stage1:
phase='Finals'
elif 'Final' in stage1:
phase='Finals'
elif 'SF' in stage1:
phase='Finals'
elif 'QF' in stage1:
phase='Finals'
elif 'A'==stage1: phase='Pool'
elif 'B'==stage1: phase='Pool'
elif 'C'==stage1: phase='Pool'
elif 'D'==stage1: phase='Pool'
elif 'E'==stage1: phase='Pool'
elif 'F'==stage1: phase='Pool'
elif 'G'==stage1: phase='Pool'
elif 'H'==stage1: phase='Pool'
elif 'I'==stage1: phase='Pool'
elif 'J'==stage1: phase='Pool'
elif 'K'==stage1: phase='Pool'
elif 'L'==stage1: phase='Pool'
elif 'M'==stage1: phase='Pool'
elif 'N'==stage1: phase='Pool'
elif 'O'==stage1: phase='Pool'
elif 'P'==stage1: phase='Pool'
elif 'Q'==stage1: phase='Pool'
elif 'R'==stage1: phase='Pool'
elif 'S'==stage1: phase='Pool'
elif 'T'==stage1: phase='Pool'
if 'IS' in stage1:
kind="Senior's Individual"
elif 'IF' in stage1:
kind="Women's Individual"
elif 'IM' in stage1:
kind="Men's Individual"
elif 'IC' in stage1:
kind="Children's Individual"
elif 'IJ' in stage1:
kind="Junior's Individual"
elif 'EJ' in stage1:
kind="Junior's Team"
elif 'EF' in stage1:
kind="Men's Team"
elif 'ES' in stage1:
kind="Senior's Team"
if 'individual masculin.' in stage0:
kind="Men's Individual"
if 'echipe.' in stage0:
kind="Mixed Team"
if 'individual juniori' in stage0:
kind="Junior's Team"
if 'individual feminin' in stage0:
kind="Junior's Team"
if 'individual veterani' in stage0:
kind="Senior's Team"
if 'male team' in stage0:
kind="Men's Team"
if 'junior 1 individual' in stage0:
kind="Junior's Individual"
if 'junior 2 individual' in stage0:
kind="Junior's Individual"
elif match=='F':
kind="Women's Individual"
elif match=='M':
kind="Men's Individual"
elif match=='J':
kind="Junior's Individual"
elif match=='SF_s':
kind="Women's Individual"
elif match=='SM_s':
kind="Men's Individual"
elif match=='J_s':
kind="Junior's Individual"
if kind=='Unknown':
category='Unknown'
teams='Unknown'
else:
category=kind.split(' ')[0][:-2]
teams=kind.split(' ')[1]
if year<2014:
category=category.replace('Senior','Men')
if year in [2018]:
category=category.replace('Senior','Men')
return category,teams,phase
# Load names
members=pd.read_csv('data/members_base.csv')
members.head()
# +
name_exceptions={'Atanasovski':'<NAME>. (MAC)',
'Dobrovicescu (SON)':'Dobrovicescu T. (SON)',
'Ianăș':'Ianăș F.',
'Crăciun (Tamang) Sujata':'Crăciun S.',
'Crăciun (Tamang) Sujata':'Crăciun S.',
'Dinu (Ioniță) Claudia-Andreea':'<NAME>.',
'Arabadjiyski': 'Arabadjiyski A.',
'Mandia':'Mandia F.',
'Stanev':'Stanev A.',
'Mochalov':'Mochalov O.',
'Sozzi':'Sozzi A.',
'Crăciunel':'Crăciunel I.',
'Craciunel':'Crăciunel I.',
'Sagaev':'Sagaev L.',
'Buzás':'Búzás C.',
'Csala':'Csala D.',
'Dimitrov':'D<NAME>.',
'Józsa':'Józsa L.',
'Creangă':'Creangă A.',
'Duțescu':'Duțescu M.',
'Furtună':'Furtună G.',
'Gârbea':'Gârbea I.',
'Stupu':'Stupu I.',
'Mahika-Voiconi':'Mahika-Voiconi S.',
'Mahika':'Mahika-Voiconi S.',
'Stanciu':'Stanciu F.',
'Vrânceanu':'Vrânceanu R.',
'Wolfs':'Wolfs J.',
'Ducarme':'Ducarme A.',
'Sbârcea':'Sbârcea B.',
'Mocian':'Mocian A.',
'Hatvani':'Hatvani L.',
'Dusan':'Dusan N.',
'Borota':'Borota V.',
'Tsushima':'Tsushima K.',
'Tráser':'Tráser T.',
'Colțea':'Colțea A.',
'Brîcov':'Brîcov A.',
'Yamamoto':'Yamamoto M.',
'Crăciun':'Crăciun D.'}
redflags_names=['-','—','—',np.nan,'. ()','— ','- -.','- -. (-)',
'Kashi','Sankon','București','Victorii:','Sakura','Taiken','Ikada','Sonkei','CRK','Museido',
'Ichimon','Bushi Tokukai 1','Competitori – Shiai-sha','Echipa - roşu','Numele şi prenumele',
'Victorii:','Victorii: 0','Victorii: 1','Victorii: 2','Victorii: 3','Victorii: 4',
'Victorii: 5','?','Kyobukan','2/5','2/6','3/8','Finala','Kyobukan (0/0/0)','―',
'(clasament final după meci de baraj)','CRK (Bucuresti)','Kaybukan','Isshin (Cluj)',
'Ikada (Bucureşti)','Kyobukan (Braşov)','Puncte:','KASHI','Budoshin','Isshin',
'— (—)','4. B.','4. Baraj: Stupu M - Hostina','4. Baraj: Moise KM - Korenschi M',
'Bushi Tokukai (2/8/17)','CRK 2 (1/6/14)', 'CRK 2','CRK 1','Loc I.:',
'Bushi Tokukai 2 (M Ciuc)','Echipa suport']
redflags_names2=['Bushi Tokukai','Eliminatoriu','finala','Finala','Fianala','Ikada','Ichimon','Pool',
'Locul ','Lotul ','Loc ','Grupa ','Isshin','Meciul ','Victorii:']
name_equals={'<NAME>.':'<NAME>.',
'Ghinet C.':'Ghineț C.',
'Domniț<NAME>.':'Domniță M.',
'Garbea I.':'Gârbea I.',
'Ah-hu W.':'Ah-hu S.',
'<NAME>.':'Horváth M.',
'<NAME>.':'Ioniț<NAME>.',
'Medvedschi I.':'Medvețchi I.',
'Mahika S.':'Mahika-Voiconi S.',
'Mate L.':'Máté L.',
'Hentea L.':'Hentea A.',
'Stupu I.':'Stupu A.',
'Ah-Hu S.':'Ah-hu S.',
'<NAME>.':'<NAME>.',
'Angelescu M.':'Angelescu M.',
'Apostu D.':'Apostu T.',
'Brâcov A.':'Brîcov A.',
'Catoriu D.':'Cantoriu D.',
'Călina A.':'Călina C.',
'Buzás C.':'Búzás C.',
'Korenshi E.':'Korenschi E.',
'Pleșa R.':'Pleșea R.',
'Galos A.':'Galoș A.',
'<NAME>.':'<NAME>.',
'<NAME>.':'Györfi S.',
'Ghineț G.':'Ghineț C.',
'Hostina E.':'Hoștină E.',
'Hostină E.':'Hoștină E.',
'Ianăs F.':'Ianăș F.',
'Ianas F.':'Ianăș F.',
'Lacatus M.':'Lăcătuș M.',
'Máthé L.':'Máté L.',
'Burinaru A.':'Burinaru Al.',
'Nastase M.':'Năstase E.',
'Oprisan A.':'Oprișan A.',
'Pârlea A.':'Pîrlea A.',
'S<NAME>.':'Sabău D.',
'Spriu C.':'Spiru C.',
'Bíró S.':'Biró S.',
'Stănculascu C.':'Stănculescu C.',
'Vrânceanu M.': 'Vrânceanu L.',
'Wasicek V.':'Wasicheck W.',
'Wasicsec W.':'Wasicheck W.',
'Wasicsek W.':'Wasicheck W.',
'Zolfoghari A.':'Zolfaghari A.'}
name_doubles={
'<NAME>':'Cristea Cr.',
'<NAME>-Ștefan':'Cristea Că.',
'<NAME>':'Sandu Mar.',
'<NAME>-Serban':'Sandu Mat.',
'<NAME>':'Georgescu An.',
'<NAME>':'Georgescu Al.',
'<NAME>':'Péter Cso.',
'<NAME>':'Péter Csa.',
'<NAME>':'<NAME>ihn.',
'<NAME>':'<NAME>.',
'Luca':'<NAME>.',
'<NAME>':'<NAME>ha.',
'<NAME>.':'<NAME>.',
'<NAME>':'<NAME>.',
'Luca Traian-Dan':'<NAME>.',
'<NAME>':'<NAME>.',
'<NAME>':'<NAME>.',
'<NAME>':'<NAME>.',
'<NAME>':'<NAME>.',
'<NAME>':'<NAME>.',
'<NAME>':'<NAME>.',
'<NAME>':'Burinaru An.',
'<NAME>':'Dudaș F.',
'<NAME>':'Dudaș F.'}
letter_norm={'ţ':'ț','ş':'ș','Ş':'Ș'}
def name_cleaner(name):
if name in name_doubles:
return name_doubles[name]
else:
for letter in letter_norm:
name=name.replace(letter,letter_norm[letter])
if name not in name_exceptions:
nc=name.replace(' ',' ').split('(')
else:
nc=name_exceptions[name].split('(')
rname=nc[0].strip()
rnames=rname.split(' ')
sname=rnames[0]+' '+rnames[1][0]+'.'
if sname in name_equals:
sname=name_equals[sname]
if sname in name_doubles:
print(name,sname)
return sname
else:
return sname
# -
def name_ok(name):
if name==np.nan: return False
if str(name)=='nan': return False
if name not in redflags_names:
if np.array([i not in name for i in redflags_names2]).all():
return True
return False
# Standardize names
all_players={}
all_players_r={}
all_players_unsorted=set()
for year in matches:
for competition in matches[year]:
for match in matches[year][competition]:
for color in ['aka','shiro']:
name=match[color]['name']
all_players_unsorted.add(name)
if name_ok(name):
name=name_cleaner(name)
rname=match[color]['name']
if rname not in all_players_r:all_players_r[rname]=name
if name not in all_players: all_players[name]={}
if year not in all_players[name]:all_players[name][year]={'names':set()}
all_players[name][year]['names'].add(rname)
name_linker={}
for i in members.index:
name=members.loc[i]['name']
try:
cname=name_cleaner(name)
except:
print(name)
if cname not in name_linker:name_linker[cname]=set()
name_linker[cname].add(name)
names_abbr={}
for name in name_linker:
if len(name_linker[name])>1:
#only for dev to create exceptions for duplicate person names.
print(name,name_linker[name])
for i in name_linker[name]:
names_abbr[i]=name
names_abbr_list=[]
name_abbr2long={}
name_abbr2club={}
for i in members.index:
name=members.loc[i]['name']
club=members.loc[i]['club']
year=members.loc[i]['year']
names_abbr_list.append(names_abbr[name])
name_abbr2long[names_abbr[name]]=name
if names_abbr[name] not in name_abbr2club:name_abbr2club[names_abbr[name]]={}
name_abbr2club[names_abbr[name]][year]=club
members['name_abbr']=names_abbr_list
all_shinpan={}
all_shinpan_r={}
all_shinpan_unsorted=set()
for year in matches:
for competition in matches[year]:
for match in matches[year][competition]:
if 'shinpan' in match:
for color in ['fukushin1','shushin','fukushin2']:
aka=match['aka']['name']
shiro=match['shiro']['name']
if (name_ok(aka)) and\
(name_ok(shiro)) and\
(name_cleaner(aka) in all_players) and\
(name_cleaner(shiro) in all_players):
rname=match['shinpan'][color]
all_shinpan_unsorted.add(rname)
if name_ok(rname):
name=name_cleaner(rname)
if name not in all_shinpan: all_shinpan[name]=[]
all_shinpan[name].append(match)
if rname not in all_shinpan_r:all_shinpan_r[rname]=name
name_abbr2long_extends={
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>.',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>.',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'<NAME>.':'<NAME>',
'Stanev A.':'Stanev A.',
'Mochalov O.':'Mochalov O.',
'Sozzi A.':'Sozzi A.',
'<NAME>.':'<NAME>.'
}
for i in all_shinpan.keys():
if i not in name_abbr2long:
name_abbr2long[i]=name_abbr2long_extends[i]
# Infer clubs
#naive infer
redflags_clubs=['','N/A','RO1','RO2']
club_equals={'MLD':'MOL/Md',
'MOL':'MOL/Md',
'IKD':'IKA',
'HUN':'HUN/Hu',
'BUL':'BUL/Bg',
'TUR':'TUR/Tr',
'MAC':'MAC/Mc',
'MNE':'MNE/Mn',
'SRB':'SRB/Sr',
'ITA':'ITA/It',
'ISS':'ISH',
'Musso, Bg':'MUS/Bg',
'Makoto, Sr':'MAK/Sr',
'Szeged, Hu':'SZE/Hu'}
for name in all_players:
for year in all_players[name]:
for name_form in all_players[name][year]['names']:
if '(' in name_form:
club=name_form.split('(')[1].strip()[:-1]
if club in club_equals: club=club_equals[club]
if club not in redflags_clubs:
all_players[name][year]['club']=club
for name in all_players:
for year in all_players[name]:
if 'club' not in all_players[name][year]:
#more than 1 year?
years=np.sort(list(all_players[name].keys()))
if len(years)>1:
#get club from previous year
for y in range(years[0],year):
if y in all_players[name]:
if 'club' in all_players[name][y]:
all_players[name][year]['club']=all_players[name][y]['club']
#if still not found, get club from next year
if 'club' not in all_players[name][year]:
#get club from next year
for y in np.arange(years[-1],year,-1):
if y in all_players[name]:
if 'club' in all_players[name][y]:
all_players[name][year]['club']=all_players[name][y]['club']
for name in all_players:
if name not in name_abbr2long:
#infer using longest available name
names={len(j):j for i in all_players[name] for j in all_players[name][i]['names']}
if len(names)>0:
inferred_name=names[max(names.keys())]
if '(' in inferred_name:
inferred_name=inferred_name[:inferred_name.find('(')-1]
print(name,inferred_name)
name_abbr2long[name]=inferred_name
else:
print(name,all_players[name])
for name in all_players:
years=np.sort(list(all_players[name].keys()))
for year in all_players[name]:
if 'club' not in all_players[name][year]:
#get from list
if name in name_abbr2club:
minyear=min(name_abbr2club[name].keys())
if year in name_abbr2club[name]:
all_players[name][year]['club']=name_abbr2club[name][year]
elif year<minyear:
all_players[name][year]['club']=name_abbr2club[name][minyear]
elif len(years)>1:
#get club from previous year
for y in range(years[0],year):
if y in all_players[name]:
if 'club' in all_players[name][y]:
all_players[name][year]['club']=all_players[name][y]['club']
#if still not found, get club from next year
if 'club' not in all_players[name][year]:
#get club from next year
for y in np.arange(years[-1],year,-1):
if y in all_players[name]:
if 'club' in all_players[name][y]:
all_players[name][year]['club']=all_players[name][y]['club']
# Interpolate missing years for members
clubs_manual={
'<NAME>.':'BTK',
'Nagy V.':'ISH',
'Goró L.':'BTK',
'Ghineț G.':'YUK',
'Cioată E.':'KAS',
'Leat M.':'IKA',
'Perianu S.':'KNS',
'Ah-hu S.':'ICH',
'Preda A.':'CRK',
'Salló Z.':'BTK',
'András Z.':'BTK',
'Neagu F.':'IKA',
'Bódi Z.':'KYO',
'Bumbu D.':'ISH',
'Botean A.':'ISH',
'Moldoveanu M.':'ISH',
'Jeszenszki T.':'BTK',
'Suru N.':'SAM',
'Balázs S.':'BTK',
'Perdi L.':'ISH',
'Oprișan A.':'IKA',
'Horváth D.':'BTK',
'Sandache I.':'BTK',
'<NAME>.':'KAY',
'Angelescu M.':'SAM',
'Bărbulescu E.':'MUS',
'Canceu A.':'KAS',
'Crișan E.':'ISH',
'<NAME>.':'KAS',
'Dumbravă L.':'ISH',
'<NAME>.':'IKA',
'<NAME>.':'MUS',
'Keresztes M.':'BTK',
'Macavei I.':'KYO',
'Mitelea C.':'ICH',
'Pavel A.':'IKA',
'Pienaru S.':'ISH',
'Szikszai M.':'BTK',
'Tamang S.':'SAM',
'Tiron L.':'KNS',
'Turdean S.':'KAS',
'Wasicheck W.':'ISH',
'Ștefan C.':'IKA'
}
club_errors=[]
for name in all_players:
for year in all_players[name]:
if 'club' not in all_players[name][year]:
#if still not found, print error, infer other way
if name in clubs_manual:
all_players[name][year]['club']=clubs_manual[name]
else:
all_players[name][year]['club']='XXX'
club_errors.append(name)
set(club_errors)
clubs={}
for name in all_players:
for year in all_players[name]:
club=all_players[name][year]['club']
if club not in clubs:clubs[club]={}
if year not in clubs[club]:clubs[club][year]=set()
clubs[club][year].add(name)
def outcome_cleaner(outcome):
if outcome=='E': return True
else: return False
def outcome_from_points(aka,shiro):
if aka==shiro: return 'X',0
elif aka>shiro: return 'A',str(aka-shiro)
else: return 'S',str(shiro-aka)
redflags_points=['Puncte']
def point_clean1(point):
return point.replace('○','O').replace('I','H').replace('×','')\
.replace('–','').replace('1','O').replace('—','').replace('?','')
def points_cleaner(points):
hansoku=0
if '∆' in points:
hansoku=1
points=points.replace('∆','')
if '▲' in points:
hansoku=1
points=points.replace('▲','')
if '(Ht)' in points:
hansoku=1
points=points.replace('(Ht)','')
if '(victorie)' in points:
points=points.replace('(victorie)','OO')
points=points.strip()
if len(points)>2:
print(points,'error')
elif len(points)>1:
point1=points[0]
point2=points[1]
elif len(points)>0:
point1=points[0]
point2=''
else:
point1=''
point2=''
point1=point_clean1(point1)
point2=point_clean1(point2)
return point1,point2,len(points),hansoku
def club_cleaner(club):
if '/' in club:
return club.split('/')[0],club.split('/')[1].upper()
else:
return club,'RO'
pretty_clubs={'ARA':'Arashi', 'BSD':'Bushido', 'BTK':'Bushi Tokukai', 'BG':'Bulgaria',
'CDO':'Coroan de Oțel', 'CRK':'Clubul Român de Kendo', 'HAR':'Hargita',
'ICH':'Ichimon', 'IKA':'Ikada','ISH':'Ishhin', 'IT':'Italy','HU':'Hungary',
'KAS':'Kashi', 'KNS':'Kenshin', 'KYO':'Kyobukan', 'MC':'Macedonia',
'SR':'Serbia', 'MN':'Montenegro', 'MD':'Moldova', 'MUS':'Museido',
'RON':'Ronin-do', 'SAK':'Sakura', 'SAM':'Sam-sho','SAN':'Sankon', 'SBK':'Sobukan',
'SON':'Sonkei', 'SR':'Serbia', 'TAI':'Taiken', 'TR':'Turkey', 'XXX':'Unknown',
'YUK':'Yu-kai','KAY':'Kaybukan'}
def pretty_club(club, country):
if country!='RO':
return pretty_clubs[country]
else: return pretty_clubs[club]
unregistered_members=[]
for name in all_players:
if name not in set(members['name_abbr'].values):
years=np.sort(list(all_players[name].keys()))
for year in range(min(years),max(years)+1):
if year in all_players[name]:
iyear=year
else:
iyear=max(years)
club,country=club_cleaner(all_players[name][iyear]['club'])
if country=='RO':
activ='Inactiv'
dan=0
else:
activ=''
dan=''
unregistered_members.append({'name':name_abbr2long[name],
'club':club,'active':activ,'year':year,'dan':dan,'country':country,
'pretty_club':pretty_club(club,country)})
members['country']='RO'
members2=pd.concat([members,pd.DataFrame(unregistered_members)])
# Appears in competition but Mu DAN
members_mu_dan_extensions=[]
members_by_name=members2.set_index(['name'])
for year in matches:
members_by_year=members2.set_index(['year']).loc[year]
for competition in matches[year]:
print(year,competition)
for k in matches[year][competition]:
aka=k['aka']['name']
shiro=k['shiro']['name']
if (name_ok(aka)) and\
(name_ok(shiro)) and\
(name_cleaner(aka) in all_players) and\
(name_cleaner(shiro) in all_players):
for a in ['aka','shiro']:
for h in k[a]:
if h=='name':
name=k[a][h]
rname=name_abbr2long[all_players_r[name]]
if rname not in members_by_year['name'].values:
dummy=members_by_name.loc[[rname]]
dummy=dummy[dummy['year']==min(dummy['year'])]
dummy=dummy.reset_index()
dummy['year']=year
members_mu_dan_extensions.append(dummy)
members3=pd.concat([members2,pd.concat(members_mu_dan_extensions)])
members3=members3.drop('Unnamed: 0',axis=1).drop_duplicates()
members3.to_csv('data/members.csv')
master_matches=[]
for year in matches:
members_by_year=members3.set_index(['year']).loc[year].drop_duplicates()
for competition in matches[year]:
print(year,competition)
for k in matches[year][competition]:
good=True
match={'year':year,'competition':competition}
match['match_category'],match['match_teams'],match['match_phase']=match_cleaner(year,k['match_type'])
if 'shinpan' in k:
for color in ['fukushin1','shushin','fukushin2']:
if color in k['shinpan']:
if k['shinpan'][color] in all_shinpan_r:
match[color]=name_abbr2long[all_shinpan_r[k['shinpan'][color]]]
aka=k['aka']['name']
shiro=k['shiro']['name']
if (name_ok(aka)) and\
(name_ok(shiro)) and\
(name_cleaner(aka) in all_players) and\
(name_cleaner(shiro) in all_players):
for a in ['aka','shiro']:
points=''
for h in k[a]:
if h=='name':
name=k[a][h]
match[a+' name']=name_abbr2long[all_players_r[name]]
club, country=club_cleaner(all_players[all_players_r[name]][year]['club'])
match[a+' club'], match[a+' country']=club, country
match[a+' dan']=members_by_year.set_index(['name']).\
loc[match[a+' name']]['dan']
match[a+' pretty_club']=pretty_club(club, country)
else:
point=k[a][h]
if str(point)=='nan': point=''
points=points+point
for redflag in redflags_points:
if redflag in points:
good=False
if good:
match[a+' point1'],match[a+' point2'],match[a+' points'],match[a+' hansoku']=points_cleaner(points)
else:
good=False
if good:
if 'outcome' in k:
match['encho']=outcome_cleaner(k['outcome'])
else:
match['encho']=False
match['winner'],match['difference']=outcome_from_points(match['aka points'],match['shiro points'])
master_matches.append(match)
data=pd.DataFrame(master_matches)
# Cleanup
data['aka hansoku']=data['aka hansoku'].replace(0,'').replace(1,'Δ')
data['shiro hansoku']=data['shiro hansoku'].replace(0,'').replace(1,'Δ')
data.to_csv('data/matches.csv')
# Group by player
aka=data[[i for i in data.columns if 'shiro ' not in i]]
aka.columns=[i.replace('aka ','') for i in aka.columns]
aka['color']='aka'
aka['opponent']=data['shiro name']
shiro=data[[i for i in data.columns if 'aka ' not in i]]
shiro.columns=[i.replace('shiro ','') for i in shiro.columns]
shiro['color']='shiro'
shiro['opponent']=data['aka name']
extended_matches=pd.concat([aka,shiro],axis=0).reset_index(drop=True)
extended_matches.head()
extended_matches.to_csv('data/extended_matches.csv')
p1=extended_matches[[i for i in extended_matches.columns if i!='point2']]
p2=extended_matches[[i for i in extended_matches.columns if i!='point1']]
p1.rename(columns={'point1':'point'}, inplace=True)
p2.rename(columns={'point2':'point'}, inplace=True)
extended_points=pd.concat([p1,p2],axis=0).reset_index(drop=True)
extended_points.to_csv('data/extended_points.csv')
extended_points.columns
shu=extended_points[[i for i in extended_points.columns if 'fukushin' not in i]]
shu.columns=[i.replace('shushin','shinpan') for i in shu.columns]
fk1=extended_points[[i for i in extended_points.columns if 'shushin' not in i and 'fukushin2' not in i]]
fk1.columns=[i.replace('fukushin1','shinpan') for i in fk1.columns]
fk2=extended_points[[i for i in extended_points.columns if 'shushin' not in i and 'fukushin1' not in i]]
fk2.columns=[i.replace('fukushin2','shinpan') for i in fk2.columns]
extended_shinpan=pd.concat([shu,fk1,fk2],axis=0).reset_index(drop=True)
extended_shinpan.to_csv('data/extended_shinpan.csv')
extended_shinpan.columns
extended_shinpan['club'].unique()
# Competitor statistics
competitors={}
for i in data.T.iteritems():
for a in ['aka ','shiro ']:
name=i[1][a+'name']
club=i[1][a+'club']
if name not in competitors:
competitors[name]={'U':0,'club':club}
for j in ['point1','point2']:
point=i[1][a+j]
if point!='':
if point not in competitors[name]:competitors[name][point]=0
competitors[name][point]+=1
competitors[name]['U']+=1
data2=pd.DataFrame(competitors)
data2.T.to_csv('data/competitors.csv')
| kendo romania/.ipynb_checkpoints/2. matches-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Computer Simulations - Project 1. @ ELTE
# # N-body problem of satellite formation and clustering of planetary debris inside an asteroid belt
# +
from platform import python_version
print(python_version())
# +
# #%%bash
#jupyter nbextension install --py mayavi --user # Install if missing
#jupyter nbextension enable mayavi --user --py
# +
# %%cmd
rem jupyter nbextension install --py mayavi --user # Install if missing
jupyter nbextension enable mayavi --user --py
# +
# #%matplotlib notebook
# +
import os
import random
import numpy as np
from numba import jit
from datetime import datetime
from mayavi import mlab
from tvtk.api import tvtk
mlab.init_notebook()
import seaborn as sns
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
from sklearn.cluster import MeanShift, DBSCAN, KMeans
from IPython.display import clear_output, display
# -
# ## Configure matplotlib and seaborn parameters
# +
out = '..\\out\\'
figsave_dir = '..\\Documentation\\Report\\src\\img_src\\'
figsave_format = 'pdf'
figsave_dpi = 200
# Scale for matplotlib scatter plots to show all bodies
s_scale = 1e8
# Set axtick dimensions
major_size = 6
major_width = 1.2
minor_size = 3
minor_width = 1
mpl.rcParams['xtick.major.size'] = major_size
mpl.rcParams['xtick.major.width'] = major_width
mpl.rcParams['xtick.minor.size'] = minor_size
mpl.rcParams['xtick.minor.width'] = minor_width
mpl.rcParams['ytick.major.size'] = major_size
mpl.rcParams['ytick.major.width'] = major_width
mpl.rcParams['ytick.minor.size'] = minor_size
mpl.rcParams['ytick.minor.width'] = minor_width
# Seaborn style settings
sns.set_style({'axes.axisbelow': True,
'axes.edgecolor': '.8',
'axes.facecolor': 'white',
'axes.grid': True,
'axes.labelcolor': '.15',
'axes.spines.bottom': True,
'axes.spines.left': True,
'axes.spines.right': True,
'axes.spines.top': True,
'figure.facecolor': 'white',
'font.family': ['sans-serif'],
'font.sans-serif': ['Arial',
'DejaVu Sans',
'Liberation Sans',
'Bitstream Vera Sans',
'sans-serif'],
'grid.color': '.8',
'grid.linestyle': '--',
'image.cmap': 'rocket',
'lines.solid_capstyle': 'round',
'patch.edgecolor': 'w',
'patch.force_edgecolor': True,
'text.color': '.15',
'xtick.bottom': True,
'xtick.color': '.15',
'xtick.direction': 'in',
'xtick.top': True,
'ytick.color': '.15',
'ytick.direction': 'in',
'ytick.left': True,
'ytick.right': True})
# -
# ## Auxiliary functions
def save_mpl_fig(fig, fname, save_format='png', save_dpi=150, **kwargs):
fig.savefig(fname,
format=save_format,
dpi=save_dpi,
facecolor='black',
bbox_inches='tight')
# ## Setting parameters
# +
# [Mass in kg |
# Distance from central celestail body in AU |
# eccentricity |
# Size in AU]
Planets={
'Sun': [1.989e30, 0, 0.0001, 4.649e-03],
'Moon': [7.348e22, 0.00257, 0.0549, 1.161e-05],
'Mercury': [3.285e23, 0.466697, 0.205630, 1.631e-05],
'Venus': [4.867e24, 0.728213, 0.006772, 4.045e-05],
'Earth': [5.972e24, 1.017, 0.0167086, 4.259e-05],
'Mars': [6.39e23, 1.666, 0.0934, 2.266e-05],
'Jupiter': [1.898e27, 5.4588, 0.0489, 4.673e-04],
'Saturn': [5.683e26, 10.1238, 0.0565, 3.893e-04],
'Uranus': [8.681e25, 20.11, 0.046381, 1.695e-04],
'Neptune': [1.024e26, 30.33, 0.009456, 1.646e-04],
'Pluto': [1.309e22, 49.305, 0.2488, 7.954e-06],
'Halley': [2.2e14, 35.082, 0.96714, 3.68e-08]
}
Planet_Colors={
'Sun': np.array([216, 148, 29])/255,
'Moon': np.array([204, 198, 195])/255,
'Mercury': np.array([186, 186, 186])/255,
'Venus': np.array([216, 194, 153])/255,
'Earth': np.array([45, 52, 130])/255,
'Mars': np.array([217, 120, 62])/255,
'Jupiter': np.array([187, 155, 99])/255,
'Saturn': np.array([222, 181, 82])/255,
'Uranus': np.array([201, 239, 241])/255,
'Neptune': np.array([72, 120, 242])/255,
'Pluto': np.array([65, 25, 20])/255,
'Halley': np.array([0,0,0])/255
}
# -
AU_to_km = 149597871 # 1 AU = 149,597,871 km
y_to_h = 8760 # 1 year = 8760 hours
GMPlusm = 4 * np.pi * np.pi # Kepler's Third Law: G(M + m)/(4*pi^2) = 1 [AU^3/year^2]
G = 6.67408e-11 # Gravitational constant [m^3 * kg^-1 * s^-2]
G_km = G / 1000**3 * 3600**2 # Gravitational constant [km^3 * kg^-1 * hour^-2]
G_AU = G_km / AU_to_km**3 * y_to_h**2 # Gravitational constant [AU^3 * kg^-1 * year^-2]
c_lgh = 299792.458 # Speed of light [km/s]
c_AU = c_lgh / AU_to_km * 3600 * y_to_h # Speed of light [AU/year]
# ## Transfer function
def transfer_function(t, width=100, cutoff=100):
r = np.zeros(len(t))
norm = 1 / np.e**(-1/width*np.abs(cutoff))
for i, k in enumerate(t):
if np.abs(k) < cutoff:
r[i] = 1
else:
r[i] = norm * np.e**(-1/width*np.abs(k))
return r
# +
x_min = -1000
x_max = 1000
func = np.linspace(x_min, x_max, 999)
plt.plot(func, transfer_function(func, width=500, cutoff=100))
plt.show()
# -
# ## Generating mass and size of small bodies
#
# #### Sourced from
# - <NAME>. & <NAME>. & <NAME> & <NAME>. (2002). Hidden Mass in the Asteroid Belt. Icarus. 158. 98-105. 10.1006/icar.2002.6837. doi:[10.1006/icar.2002.6837](https://doi.org/10.1006/icar.2002.6837)
#
# For C, S and M types:
# $$
# \overline{\rho_{C}} = 1.38 \frac{\text{g}}{\text{cm}^{3}} = 1380 \frac{\text{kg}}{\text{m}^{3}}
# $$
#
# $$
# \overline{\rho_{S}} = 2.27 \frac{\text{g}}{\text{cm}^{3}} = 2270 \frac{\text{kg}}{\text{m}^{3}}
# $$
#
# $$
# \overline{\rho_{T}} = 5.32 \frac{\text{g}}{\text{cm}^{3}} = 5320 \frac{\text{kg}}{\text{m}^{3}}
# $$
# +
# Number of small bodies around the central object
n_bodies = int(1e2)
# Select whether the objects' motion in the asteroid belt
# would be prograde or retrograde. In this case, prograde
# will mean counterclockwise, and retrograde will mean
# clockwise motion of the asteroid belt.
prograde = True
# Select central object
planet = 'Jupiter'
R = Planets[planet][3]
M = Planets[planet][0]
# -
# In kg/m^3
rho_C = 1380
rho_S = 2270
rho_T = 5320
def gen_geom(planet='Jupiter', n_bodies=1e02):
geometries = np.zeros((n_bodies+1, 2))
# Range of radius values for small bodies
# Radius in meters
lowest_R = 5000
highest_R = 50000
# Add data of the central planet into the geometries list
# [0,0] = M [kg]
# [0,1] = R [AU]
geometries[0,0] = M
geometries[0,1] = R
# Calculate geometric parameters of the small objects
# [i,0] = m [kg]
# [i,1] = r [AU]
for i in range(n_bodies):
radius = np.random.randint(low=lowest_R, high=highest_R)
volume = 4/3 * radius**3 * np.pi
mass = volume * rho_S
geometries[i+1,0] = mass
geometries[i+1,1] = radius / 1000 / AU_to_km
return geometries
# ## Generating coordinates of small bodies
def sign_choose():
return -1 if random.random() < 0.5 else 1
def gen_coord_and_vel(planet='Jupiter', n_bodies=1e02, obliquity=0.5):
coordinates = np.zeros((n_bodies+1, 2))
velocities = np.zeros((n_bodies+1, 2))
dr = 1.5
r = 20
max_e = 0.4
# Focal distance of trajectories
# This is basically the central object's coordinates
# [c] = AU
c = obliquity * R
# Add data of the central planet into the coordinates and
# velocities list
coordinates[0,:] = np.array((c,0))
velocities[0,:] = np.array((0,0))
# Generating bodies' coordinates in 2D
# The points should be situated on the surface of
# an ellipse, which axes' length sould be: a,b
for i in range(n_bodies):
# Semi-major and semi-minor axes
# Generating length of the perigees and eccentricities
# and calculating semi-major axis lengths using these
# r_p = a * (1 - e^2)
# --> a = r_p / (1 - e^2)
# [r_p] = AU
r_p = R * r + R * np.random.random() * dr * sign_choose()
e = np.random.random() * max_e
# [a] = AU
a = r_p / (1 - e**2)
# Calculating semi-minor axis length and eccentricity
# c = sqrt(a^2 - b^2)
# e = c/a
# --> e = sqrt(1 - (b/a)^2)
# --> b = sqrt(a^2 * (1 - e^2))
# --> b = r_p / sqrt(1 - e^2)
# [b] = AU
b = r_p / np.sqrt(1 - e**2)
# Generating angle parameters to get coordinates of objects
# phi : [0°,360°]
# theta : [0°,180°]
phi = (np.random.randint(360) + np.random.random()) / 180 * np.pi
# we choose an upper limit for theta
#theta_max = 10
#theta = (np.random.randint(theta_max) * sign_choose() + 89 + np.random.random()) / 180 * np.pi
# [x] = AU
# [y] = AU
x_coord = a * np.cos(phi)
y_coord = b * np.sin(phi)
#x_coord = a * np.sin(theta) * np.cos(phi)
#y_coord = b * np.sin(theta) * np.sin(phi)
#z_coord = a * np.cos(theta)
# 1. Calculate real length of velocity vectors for the
# Vis-Viva equation
#
# --- Parameters' dimensions ---
# 1/a) Distance from central object
# [d] = AU
d = np.sqrt((x_coord-c)**2 + y_coord**2)
# 1/b) Tangential velocity
# |v| = sqrt(G * M * (2/d - 1/a))
# [v] = sqrt(AU^3 * kg^-1 * year^-2 * kg * AU^-1) =
# = sqrt(AU^2 * year^-2) =
# = AU/year
v = np.sqrt(G_AU * M * (2/d - 1/a))
# 2. Calculate the velocity vectors
# [v_x] = AU/year
# [v_x] = AU/year
grad_r_x = -a * np.sin(phi)
grad_r_y = b * np.cos(phi)
v_e_x = grad_r_x / np.sqrt(grad_r_x**2 + grad_r_y**2)
v_e_y = grad_r_y / np.sqrt(grad_r_x**2 + grad_r_y**2)
v_x = v_e_x * v
v_y = v_e_y * v
# Save coordinates and velocities
# [r] = AU
# [v] = AU/year
coordinates[i+1,0] = x_coord
coordinates[i+1,1] = y_coord
velocities[i+1,0] = v_x
velocities[i+1,1] = v_y
return coordinates, velocities
geometries = gen_geom(planet=planet, n_bodies=n_bodies)
coordinates, velocities = gen_coord_and_vel(planet=planet, n_bodies=n_bodies, obliquity=1)
# ## RK4
def kinetic_energy(V, i):
return 1/2 * geometries[i,0] * np.linalg.norm(V)
@jit(nopython=True)
def sum_j(X, i):
# Gravitational acceleration
# --------------------------
# a_i = - G * sum_j {m_j * (r_i - r_j) / |r_i - r_j|^3}
# summ = sum_j {m_j * (r_i - r_j) / |r_i - r_j|^3}
summ = np.zeros(len(X))
for j in range(0, len(new_coordinates)):
if i != j:
# Calculate the d = |r_i - r_j| part
r_dist = np.linalg.norm(X - new_coordinates[j,:])
if (r_dist > geometries[i,1] + geometries[j,1]) and (r_dist < 2*R or j == 0):
# Calculate the d^3 part
r_cubed = r_dist*r_dist*r_dist
summ += geometries[j,0] * (X - new_coordinates[j,:]) / r_cubed
return summ
@jit(nopython=True)
def derivates(X, V, i):
assert X.size == V.size
# Create placeholder array to store output of the RK4 step
# f = [t, r_x, r_y, r_z, v_x, v_y, v_z]
dXdt = np.zeros(len(X))
dVdt = np.zeros(len(V))
# Calculate derivates for RK4
# ---------------------------
# d(t)/dt = 1
#
# d(r_x)/dt = v_x
# d(r_y)/dt = v_y
# etc...
#
# d(v_x) = a_x
# d(v_y)/dt = a_y
# etc...
# where : a_i = - G * sum_j {m_j * (r_i - r_j) / |r_i - r_j|^3}
dXdt = V
dVdt = - G_AU * sum_j(X, i)
return dXdt, dVdt
@jit(nopython=True)
def RK4_step(new_coordinates, new_velocities, i, dt):
X = new_coordinates[i]
V = new_velocities[i]
E_i = 1/2 * geometries[i,0] * np.linalg.norm(V)
k1X, k1V = derivates(X, V, i)
k1X = k1X * dt
k1V = k1V * dt
k2X, k2V = derivates(X + 0.5 * k1X, V + 0.5 * k1V, i)
k2X = k2X * dt
k2V = k2V * dt
k3X, k3V = derivates(X + 0.5 * k2X, V + 0.5 * k2V, i)
k3X = k3X * dt
k3V = k3V * dt
k4X, k4V = derivates(X + k3X, V + k3V, i)
k4X = k4X * dt
k4V = k4V * dt
dX = (k1X + 2 * k2X + 2 * k3X + k4X) / 6.0
dV = (k1V + 2 * k2V + 2 * k3V + k4V) / 6.0
return dX, dV, E_i
def step_t(new_coordinates, new_velocities, dt):
# Array to store all kinetic energies
dE = np.zeros((n_bodies+1))
for i in range(len(new_coordinates)):
dX, dV, E_i = RK4_step(new_coordinates, new_velocities, i=i, dt=dt)
new_coordinates[i] += dX
new_velocities[i] += dV
dE[i] = E_i
return new_coordinates, new_velocities, dE
def run_simulation(new_coordinates, new_velocities, max_t=1, dt=5e-3):
# Array to store kinetic energies
E = []
t = 0
while t <= max_t:
new_coordinates, new_velocities, dE = step_t(new_coordinates, new_velocities, dt=dt)
E.append(dE)
if (t//dt % 20 == 0) or (int(t/dt+1)==int(max_t/dt)):
clear_output(wait=True)
print('\rt : {0:.0f}/{1:.0f}'.format(t/dt+1, max_t/dt))
t += dt
return new_coordinates, new_velocities, np.array(E)
new_coordinates = coordinates.copy()
new_velocities = velocities.copy()
# max_t : [Years]
max_t = 100
# dt : [Years]
dt = 1e-4
start = datetime.now()
new_coordinates,\
new_velocities, \
E = run_simulation(new_coordinates, new_velocities,
max_t=max_t, dt=dt)
end = datetime.now()
#np.save('coords', new_coordinates)
#np.save('vels', new_velocities)
print('Total time was needed :', end-start)
np.save(file=out + 'geometries_max_{0}_y_dt_{1}_y'.format(max_t, dt), arr=geometries)
np.save(file=out + 'coordinates_max_{0}_y_dt_{1}_y'.format(max_t, dt), arr=coordinates)
np.save(file=out + 'velocities_max_{0}_y_dt_{1}_y'.format(max_t, dt), arr=velocities)
np.save(file=out + 'new_coordinates_max_{0}_y_dt_{1}_y'.format(max_t, dt), arr=new_coordinates)
np.save(file=out + 'new_velocities_max_{0}_y_dt_{1}_y'.format(max_t, dt), arr=new_velocities)
np.save(file=out + 'E_max_{0}_y_dt_{1}_y'.format(max_t, dt), arr=E)
geometries = np.load(file=out + 'geometries_max_{0}_y_dt_{1}_y.npy'.format(max_t, dt))
coordinates = np.load(file=out + 'coordinates_max_{0}_y_dt_{1}_y.npy'.format(max_t, dt))
velocities = np.load(file=out + 'velocities_max_{0}_y_dt_{1}_y.npy'.format(max_t, dt))
new_coordinates = np.load(file=out + 'new_coordinates_max_{0}_y_dt_{1}_y.npy'.format(max_t, dt))
new_velocities = np.load(file=out + 'new_velocities_max_{0}_y_dt_{1}_y.npy'.format(max_t, dt))
# +
fig, axes = plt.subplots(figsize=(13,8))
axislabelsize = 25
axisticksize = 20
offsettextsize = 20
a = np.r_[True, E[:,1][1:] < E[:,1][:-1]] & np.r_[E[:,1][:-1] < E[:,1][1:], True]
b = np.r_[True, E[:,1][1:] > E[:,1][:-1]] & np.r_[E[:,1][:-1] > E[:,1][1:], True]
max_indeces = np.where(a == True)[0]
min_indeces = np.where(b == True)[0]
max_values = []
for idx, x in enumerate(E[:,1]):
if idx in max_indeces:
max_values.append(x)
min_values = []
for idx, x in enumerate(E[:,1]):
if idx in min_indeces:
min_values.append(x)
axes.plot(E[:,1], color='grey', ls='-', alpha=0.3)
axes.scatter(max_indeces, max_values,
color='tab:red', marker='x', lw=2)
axes.scatter(min_indeces, min_values,
color='tab:orange', marker='x', lw=2)
axes.set_xlabel('Number of time-steps\nHere $dt = {0}$ years'.format(dt), fontweight='bold', fontsize=axislabelsize)
axes.set_ylabel('Kinetic energy of an\narbitrary asteroid [J]', fontweight='bold', fontsize=axislabelsize)
axes.tick_params(axis='both', which='major', labelsize=axisticksize)
axes.yaxis.offsetText.set_fontsize(offsettextsize)
plt.savefig(figsave_dir + 'kin_E_max_{0}_y_dt_{1}_y.pdf'.format(max_t, dt),
format=figsave_format,
dpi=figsave_dpi,
bbox_inches='tight')
plt.show()
# -
# ## Animation
plt.rcParams['animation.ffmpeg_path'] = 'C://Program Files//FFmpeg//bin//ffmpeg.exe'
plt.rcParams
# +
# Create new Figure and an Axes which fills it.
fig, axes = plt.subplots(figsize=(40, 40), facecolor='black')
axes.set_facecolor('black')
axes.axis('off')
scale = 2.5
axes.set_xlim(-np.max(coordinates[:,0])*scale, np.max(coordinates[:,0])*scale)
axes.set_ylim(-np.max(coordinates[:,1])*scale, np.max(coordinates[:,1])*scale)
axes.set_xticks([])
axes.set_yticks([])
vel_scale = 100/y_to_h
line = [axes.plot([new_coordinates[idx,0], new_coordinates[idx,0] + vel_scale*v[0]],
[new_coordinates[idx,1], new_coordinates[idx,1] + vel_scale*v[1]],
color='tab:red') for idx, v in enumerate(new_velocities[1:])]
scat = axes.scatter(coordinates[1:,0], coordinates[1:,1],
color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
central_planet = Circle(xy=(R,0), radius=geometries[0,1],
color=Planet_Colors[planet], ec=Planet_Colors[planet])
axes.add_patch(central_planet)
def animate(i):
X, V, _ = step_t(new_coordinates, new_velocities, dt=anim_dt)
X = X[1:]
V = V[1:]
scat.set_offsets(X)
for idx, i in enumerate(line):
i[0].set_data([X[idx,0], X[idx,0] + vel_scale*V[idx,0]],
[X[idx,1], X[idx,1] + vel_scale*V[idx,1]])
anim_years = 1/12
anim_dt = 5e-5
ani = FuncAnimation(fig, animate, frames=int(anim_years / anim_dt), interval=67, repeat=True)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=20, bitrate=28000,
metadata=dict(artist='<NAME>'),)
ani.save('velocity_anim_{0}_{1:.4f}_y_{2}_dt.mp4'.format(n_bodies, anim_years, anim_dt), dpi=50, writer=writer,
savefig_kwargs={'facecolor':'black'})
plt.show()
# -
# ## Plot initial conditions
# ### Coordinates
# +
fig, axes = plt.subplots(figsize=(20, 20), facecolor='black')
axes.set_facecolor('black')
axes.axis('off')
# burlywood
# silver
axes.scatter(coordinates[1:,0], coordinates[1:,1],
color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
central_planet = Circle(xy=(R,0), radius=geometries[0,1],
color=Planet_Colors[planet], ec=Planet_Colors[planet])
axes.add_patch(central_planet)
#Planet_Pch = Circle(xy=(c,0), radius=R,
# ec=Planet_Colors[planet], fc=Planet_Colors[planet])
#axes.add_patch(Planet_Pch)
scale = 1.3
axes.set_xlim(-np.max(coordinates[:,0])*scale, np.max(coordinates[:,0])*scale)
axes.set_ylim(-np.max(coordinates[:,1])*scale, np.max(coordinates[:,1])*scale)
axes.set_xticks([])
axes.set_yticks([])
save_mpl_fig(fig,
fname=figsave_dir + 'coordinate_start_n_{0}_max_{1}_y_dt_{2}_y'.format(n_bodies, max_t, dt),
save_format='png', save_dpi=150)
plt.show()
# -
1e5/(R * AU_to_km)
# ### Velocities
# +
fig, axes = plt.subplots(figsize=(30, 30), facecolor='black')
axes.set_facecolor('black')
axes.axis('off')
# burlywood
# silver
vel_scale = 100/y_to_h
for idx, v in enumerate(velocities):
axes.plot([coordinates[idx,0], coordinates[idx,0]+vel_scale*v[0]],
[coordinates[idx,1], coordinates[idx,1]+vel_scale*v[1]],
color='tab:red')
axes.scatter(coordinates[1:,0], coordinates[1:,1],
color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
central_planet = Circle(xy=(R,0), radius=geometries[0,1],
color=Planet_Colors[planet], ec=Planet_Colors[planet])
axes.add_patch(central_planet)
scale = 2.5
axes.set_xlim(-np.max(coordinates[:,0])*scale, np.max(coordinates[:,0])*scale)
axes.set_ylim(-np.max(coordinates[:,1])*scale, np.max(coordinates[:,1])*scale)
axes.set_xticks([])
axes.set_yticks([])
plt.show()
# -
# ### 3D velocity plot
# +
fig, ax = plt.subplots(figsize=(20,20), dpi=300,
facecolor='black', subplot_kw={'projection' : '3d'})
#ax.set_aspect('equal')
azimuth = 120
elevation = 30
ax.view_init(elevation, azimuth)
ax.set_facecolor('black')
ax.axis('off')
k = 5e-2 * R * np.cbrt(s_scale)
ax.set_xlim3d(-k, k)
ax.set_ylim3d(-k, k)
ax.set_zlim3d(-k, k)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# Plot planet
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = R * np.outer(np.cos(u), np.sin(v))
y = R * np.outer(np.sin(u), np.sin(v))
z = R * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x-coordinates[0,0], y, z, rstride=4, cstride=4,
color=Planet_Colors[planet], linewidth=0, alpha=0.5)
vel_scale = 100/y_to_h
for idx, v in enumerate(velocities):
ax.plot([coordinates[idx,0], coordinates[idx,0]+vel_scale*v[0]],
[coordinates[idx,1], coordinates[idx,1]+vel_scale*v[1]],
color='tab:red')
ax.scatter(coordinates[1:,0], coordinates[1:,1],
edgecolor=None, color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
plt.savefig(figsave_dir + 'velocity_start3D_max_{0}_y_dt_{1}_y.pdf'.format(max_t, dt),
format=figsave_format,
dpi=figsave_dpi,
facecolor='black',
bbox_inches='tight')
plt.show()
# -
# ## Aftermath plots
# ### Coordinates
# +
fig, axes = plt.subplots(figsize=(20, 20), facecolor='black')
axes.set_facecolor('black')
axes.axis('off')
# burlywood
# silver
axes.scatter(new_coordinates[1:,0], new_coordinates[1:,1],
color='red', s=geometries[1:,1]*s_scale, alpha=0.7)
dil = False
if dil:
for idx, x in enumerate(new_coordinates):
axes.plot([coordinates[idx,0], x[0]],
[coordinates[idx,1], x[1]],
color='tab:green', lw=1.5)
axes.scatter(coordinates[1:,0], coordinates[1:,1],
color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
central_planet = Circle(xy=(R,0), radius=geometries[0,1],
color=Planet_Colors[planet], ec=Planet_Colors[planet])
axes.add_patch(central_planet)
scale = 1.3
axes.set_xlim(-np.max(coordinates[:,0])*scale, np.max(coordinates[:,0])*scale)
axes.set_ylim(-np.max(coordinates[:,1])*scale, np.max(coordinates[:,1])*scale)
axes.set_xticks([])
axes.set_yticks([])
plt.savefig(figsave_dir + 'scatter_final_max_{0}_y_dt_{1}_y.pdf'.format(max_t, dt),
format=figsave_format,
dpi=figsave_dpi,
facecolor='black',
bbox_inches='tight')
plt.show()
# -
# ### Velocities
# +
fig, axes = plt.subplots(figsize=(30, 30), facecolor='black')
axes.set_facecolor('black')
axes.axis('off')
# burlywood
# silver
vel_scale = 100/y_to_h
for idx, v in enumerate(new_velocities):
axes.plot([new_coordinates[idx,0], new_coordinates[idx,0] + vel_scale*v[0]],
[new_coordinates[idx,1], new_coordinates[idx,1] + vel_scale*v[1]],
color='tab:red')
for idx, v in enumerate(velocities):
axes.plot([coordinates[idx,0], coordinates[idx,0] + vel_scale*v[0]],
[coordinates[idx,1], coordinates[idx,1] + vel_scale*v[1]],
color='tab:green')
axes.scatter(coordinates[1:,0], coordinates[1:,1],
color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
axes.scatter(new_coordinates[1:,0], new_coordinates[1:,1],
color='red', s=geometries[1:,1]*s_scale, alpha=0.7)
central_planet = Circle(xy=(R,0), radius=geometries[0,1],
color=Planet_Colors[planet], ec=Planet_Colors[planet])
axes.add_patch(central_planet)
scale = 2.5
axes.set_xlim(-np.max(coordinates[:,0])*scale, np.max(coordinates[:,0])*scale)
axes.set_ylim(-np.max(coordinates[:,1])*scale, np.max(coordinates[:,1])*scale)
axes.set_xticks([])
axes.set_yticks([])
plt.savefig(figsave_dir + 'velocity_final_max_{0}_y_dt_{1}_y.pdf'.format(max_t, dt),
format=figsave_format,
dpi=figsave_dpi,
facecolor='black',
bbox_inches='tight')
plt.savefig(figsave_dir + 'velocity_final_max_{0}_y_dt_{1}_y.pdf'.format(max_t, dt),
format=figsave_format,
dpi=figsave_dpi,
facecolor='black',
bbox_inches='tight')
plt.show()
# -
# ### 3D velocity plot
# +
fig, ax = plt.subplots(figsize=(20,20), dpi=300, facecolor='black', subplot_kw={'projection' : '3d'})
#ax.set_aspect('equal')
azimuth = 120
elevation = 30
ax.view_init(elevation, azimuth)
ax.set_facecolor('black')
ax.axis('off')
k = 5e-2 * R * np.cbrt(s_scale)
ax.set_xlim3d(-k, k)
ax.set_ylim3d(-k, k)
ax.set_zlim3d(-k, k)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# Plot planet
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = R * np.outer(np.cos(u), np.sin(v))
y = R * np.outer(np.sin(u), np.sin(v))
z = R * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x-new_coordinates[0,0], y, z, rstride=4, cstride=4,
color=Planet_Colors[planet], linewidth=0, alpha=0.5)
vel_scale = 100/y_to_h
for idx, v in enumerate(new_velocities):
ax.plot([new_coordinates[idx,0], new_coordinates[idx,0] + vel_scale*v[0]],
[new_coordinates[idx,1], new_coordinates[idx,1] + vel_scale*v[1]],
color='tab:red')
for idx, v in enumerate(velocities):
ax.plot([coordinates[idx,0], coordinates[idx,0] + vel_scale*v[0]],
[coordinates[idx,1], coordinates[idx,1] + vel_scale*v[1]],
color='tab:green')
ax.scatter(coordinates[1:,0], coordinates[1:,1],
color='burlywood', s=geometries[1:,1]*s_scale, alpha=0.7)
ax.scatter(new_coordinates[1:,0], new_coordinates[1:,1],
color='red', s=geometries[1:,1]*s_scale, alpha=0.7)
plt.savefig(figsave_dir + 'velocity_final3D_max_{0}_y_dt_{1}_y.pdf'.format(max_t, dt),
format=figsave_format,
dpi=figsave_dpi,
facecolor='black',
bbox_inches='tight')
plt.show()
# -
# ## Mayavi high-quality models
#
# #### Sources:
# - https://docs.enthought.com/mayavi/mayavi/auto/example_tvtk_in_mayavi.html
# - https://stackoverflow.com/questions/53074908/map-an-image-onto-a-sphere-and-plot-3d-trajectories
# - https://www.solarsystemscope.com/textures/
os.listdir('./img_src/')
# +
fig = mlab.figure(size=(800,b/a*800))
image_file = './img_src/{0}.jpg'.format(planet.lower())
# load and map the texture
img = tvtk.JPEGReader()
img.file_name = image_file
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
# (interpolate for a less raster appearance when zoomed in)
# use a TexturedSphereSource, a.k.a. getting our hands dirty
Nrad = 180
# create the sphere source with a given radius and angular resolution
sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad,
phi_resolution=Nrad)
# assemble rest of the pipeline, assign texture
sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port)
sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture)
asts1 = mlab.points3d(coordinates[1:,0], coordinates[1:,1], np.zeros((n_bodies)), geometries[1:,1],
colormap='copper', scale_factor=500)
asts2 = mlab.points3d(new_coordinates[1:,0], new_coordinates[1:,1], np.zeros((n_bodies)), geometries[1:,1],
colormap='Reds', scale_factor=500)
fig.scene.add_actor(sphere_actor)
display(fig)
# -
| Project 1/notebooks/project 1..ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:fastai]
# language: python
# name: python3
# ---
# ## Refeerences
#
# Adverserial probabilities from https://www.kaggle.com/joatom/a-test-like-validation-set
# Other References are inline.
# +
# #!conda install -c conda-forge pandas-profiling -y
# #!pip install eli5
# -
# %matplotlib inline
# +
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold, StratifiedKFold, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import roc_curve, auc, confusion_matrix, accuracy_score, precision_recall_curve
import eli5
from eli5.sklearn import PermutationImportance
import warnings
warnings.filterwarnings('ignore')
from pandas_profiling import ProfileReport
# -
revs2=pd.read_csv('/kaggle/input/trends-assessment-prediction/reveal_ID_site2.csv')
data = pd.read_csv('/kaggle/input/trends-assessment-prediction/loading.csv')
y_data = pd.read_csv('/kaggle/input/trends-assessment-prediction/train_scores.csv')
# +
s2data = data.merge(revs2)
s1data = data.merge(y_data['Id'])
y_data['is_test'] = 1
train_test = data.merge(y_data[['Id','is_test']], how ='left').fillna(0)
#train_test_ids=train_test.pop('Id')
# +
#s2data.describe()
# +
#s1data.describe()
# -
# # Helpers
# setup KFold
splits = 3
#repeats = 2
rskf = StratifiedKFold(n_splits=splits, random_state=2020, shuffle=True)
# https://www.kaggle.com/ynouri/random-forest-k-fold-cross-validation
def compute_roc_auc(X, y, index, clf):
y_predict = clf.predict_proba(X.iloc[index])[:,1]
print(y_predict)
fpr, tpr, thresholds = roc_curve(y.iloc[index], y_predict)
auc_score_roc = auc(fpr, tpr)
# http://www.davidsbatista.net/blog/2018/08/19/NLP_Metrics/
precision, recall, thresholds = precision_recall_curve(y.iloc[index], y_predict)
auc_score_prc = auc(recall, precision)
return y_predict, auc_score_roc, auc_score_prc
def preds(all_data, av_y, oof_df = None):
features=list(set(all_data.columns)-set(['Id']))
# http://fastml.com/adversarial-validation-part-one/
clf = RandomForestClassifier(n_estimators = 100, n_jobs = -1, random_state=2020)
fprs, tprs, scores_roc_train, scores_roc_valid, scores_prc_train, scores_prc_valid = [], [], [], [], [], []
# https://github.com/zygmuntz/adversarial-validation/blob/master/numerai/sort_train.py
predictions = np.zeros(av_y.shape[0])
if isinstance(oof_df,pd.DataFrame):
oof_df['av_prob']=0
for (i_train, i_valid), i in zip(rskf.split(all_data[features],av_y),range(splits)):
print('Split', i)
clf.fit(all_data[features].iloc[i_train], av_y.iloc[i_train])
# score
_, auc_score_roc_train, auc_score_prc_train = compute_roc_auc(all_data[features], av_y, i_train, clf)
y_predict, auc_score_roc, auc_score_prc = compute_roc_auc(all_data[features], av_y, i_valid, clf)
predictions[i_valid] = y_predict
#oof
if isinstance(oof_df,pd.DataFrame):
oof_df['av_prob'] += clf.predict_proba(oof_df.iloc[oof_df.index][features])[:,1]/splits
scores_roc_train.append(auc_score_roc_train)
scores_roc_valid.append(auc_score_roc)
scores_prc_train.append(auc_score_prc_train)
scores_prc_valid.append(auc_score_prc)
# Feature Importance
## https://towardsdatascience.com/running-random-forests-inspect-the-feature-importances-with-this-code-2b00dd72b92e
clf.score(all_data[features].iloc[i_valid], av_y.iloc[i_valid])
rf_feature_importances = pd.DataFrame(clf.feature_importances_,
index = features,
columns=['importance']).sort_values('importance', ascending=False)
display(rf_feature_importances.head(10))
# Permutation Importance
#permImp = PermutationImportance(clf, random_state=2021).fit(all_data.iloc[i_valid], av_y.iloc[i_valid])
#display(eli5.show_weights(permImp, feature_names = all_data.columns.tolist()))
print('Mean Accuracy roc:', np.mean(scores_roc_valid))
print('Mean Accuracy precision recal:', np.mean(scores_roc_valid))
return predictions, oof_df
# # AV for train test
# +
av_y_tt = train_test.pop('is_test')
# +
# log regression
scores = cross_val_score(LogisticRegression(random_state=2020, solver='lbfgs',max_iter=1000), train_test, av_y_tt, cv=rskf, scoring='roc_auc') #'f1'
print("Log Regression Accuracy (RoC): %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), 'AV LogReg'))
# random forrest
scores = cross_val_score(RandomForestClassifier(n_estimators = 100, n_jobs = -1, random_state=2020), train_test, av_y_tt, cv=rskf, scoring='roc_auc') #'f1'
print("Random Forrest Accuracy (RoC): %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), 'AV RandomForestClas'))
# -
train_test['is_test_prob'], _ = preds(train_test, av_y_tt)
train_test['is_test']=av_y_tt
train_test.groupby(['is_test']).describe()[['is_test_prob']]
train_test = train_test.drop('is_test',axis=1)
# # AV for site2
# +
s2data['is_site2']=1
s1data['is_site2']=0
train_s2test = s2data.append(s1data)
#train_s2test_ids = train_s2test.pop('Id')
av_y = train_s2test.pop('is_site2')
# +
# log regression
scores = cross_val_score(LogisticRegression(random_state=2020, solver='lbfgs',max_iter=1000), train_s2test, av_y, cv=rskf, scoring='roc_auc') #'f1'
print("Log Regression Accuracy (RoC): %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), 'AV LogReg'))
# random forrest
scores = cross_val_score(RandomForestClassifier(n_estimators = 100, n_jobs = -1, random_state=2020), train_s2test, av_y, cv=rskf, scoring='roc_auc') #'f1'
print("Random Forrest Accuracy (RoC): %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), 'AV RandomForestClas'))
# -
train_s2test['p'], train_test = preds(train_s2test, av_y, train_test)
train_s2test['is_s2']=av_y
train_s2test.groupby(['is_s2']).describe()[['p']]
train_test.columns.av_prob = 'site2_prob'
# +
# override with validation data to not overfit to known train_tests2 data
# -
train_test=train_test.merge(train_s2test[['Id', 'p']], how='left')
train_test['is_site2_prob'] = train_test['p'].combine_first(train_test['av_prob'])
train_test[['Id','is_test_prob','is_site2_prob']].to_csv('test_s2_probs.csv', index=False)
| notes/trends-av-probs-of-test-and-s2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sqlite3
DB_FILEPATH = "rpg_db.sqlite3"
connection = sqlite3.connect(DB_FILEPATH)
print("CONNECTION:", connection)
cursor = connection.cursor()
print(cursor)
# +
# 1) How many total Characters are there?
query_1='''
SELECT
COUNT(character_id),
COUNT(DISTINCT character_id)
FROM charactercreator_character;
'''
result=cursor.execute(query_1).fetchall()
print(result)
# -
# type(result)
#
# 2) How many of each specific subclass?
#
# There are 6 subclasses under Characters
# 3) How many total Items?
query_3='''
SELECT
COUNT(item_id),
COUNT(DISTINCT item_id)
FROM armory_item;
'''
result=cursor.execute(query_3).fetchall()
print(result)
# 4) How many of the Items are weapons?
query_4='''
SELECT
COUNT(armory_weapon.power)
FROM armory_item
INNER JOIN armory_weapon
ON armory_item.item_id=armory_weapon.item_ptr_id;
'''
result=cursor.execute(query_4).fetchall()
print(result)
# not weapon
query_4='''
SELECT
armory_item.item_id,
armory_item.name,
armory_weapon.item_ptr_id,
armory_weapon.power
FROM armory_item
LEFT JOIN armory_weapon
ON armory_item.item_id=armory_weapon.item_ptr_id;
'''
result=cursor.execute(query_4).fetchall()
print(result)
# +
import pandas as pd
#get columns from cursor object
columns=list(map(lambda x:x[0],cursor.description))
# -
#assign to DataFrame
df=pd.DataFrame(data=result,columns=columns)
print(df.shape)
df.head()
# Extra thing: Create a function
def get_data(query,conn):
'''Function tog et data from SQLite DB'''
cursor=connection.cursur()
result=cursor.execute(query).fetchall()
#get columns from curcor object
columns=list(map(lambda x: x[0],cursor.description))
#Assign to DataFrame
df=pd.DataFrame(data=result,columns=columns)
return df
# 5)How many Items does each character have? (Return first 20 rows)
query_5='''
SELECT
character_id,
COUNT(DISTINCT item_id)
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20;
'''
result=cursor.execute(query_5).fetchall()
print(result)
# +
# 6)How many Weapons does each character have? (Return first 20 rows)
query_6='''
SELECT
charactercreator_character_inventory.character_id,
armory_item.item_id,
armory_item.name,
armory_weapon.item_ptr_id,
COUNT(DISTINCT armory_weapon.power)
FROM charactercreator_character_inventory
INNER JOIN armory_item ON charactercreator_character_inventory.item_id=armory_item.item_id
INNER JOIN armory_weapon ON armory_item.item_id=armory_weapon.item_ptr_id
GROUP BY character_id
LIMIT 20;
'''
result=cursor.execute(query_6).fetchall()
print(result)
# -
# 7)On average, how many Items does each Character have?
query_7='''
SELECT
character_id,
COUNT(DISTINCT item_id)
FROM charactercreator_character_inventory
GROUP BY character_id;
'''
result=cursor.execute(query_7).fetchall()
print(result)
| module1-introduction-to-sql/SQL-class1_Assign.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.019, "end_time": "2021-10-08T18:01:43.543804", "exception": false, "start_time": "2021-10-08T18:01:43.524804", "status": "completed"} tags=[]
# ## Library Imports
# + papermill={"duration": 0.121678, "end_time": "2021-10-08T18:01:43.685717", "exception": false, "start_time": "2021-10-08T18:01:43.564039", "status": "completed"} tags=[]
from time import time
notebook_start_time = time()
# + papermill={"duration": 6.1496, "end_time": "2021-10-08T18:01:49.853388", "exception": false, "start_time": "2021-10-08T18:01:43.703788", "status": "completed"} tags=[]
import os
import re
import random as r
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader as DL
from torch.nn.utils import weight_norm as WN
from torchvision import models, transforms
import imgaug
import random as r
from imgaug import augmenters
import warnings
warnings.filterwarnings("ignore")
# + [markdown] papermill={"duration": 0.01067, "end_time": "2021-10-08T18:01:49.875458", "exception": false, "start_time": "2021-10-08T18:01:49.864788", "status": "completed"} tags=[]
# ## Constants and Utilities
# + papermill={"duration": 0.021543, "end_time": "2021-10-08T18:01:49.907781", "exception": false, "start_time": "2021-10-08T18:01:49.886238", "status": "completed"} tags=[]
def breaker(num=50, char="*") -> None:
print("\n" + num*char + "\n")
def head(x, no_of_ele=5) -> None:
print(x[:no_of_ele])
def get_augment(seed: int):
imgaug.seed(seed)
augment = augmenters.SomeOf(None, [
augmenters.HorizontalFlip(p=0.5),
augmenters.VerticalFlip(p=0.5),
augmenters.Affine(scale=(0.75, 1.25), translate_percent=(-0.1, 0.1), rotate=(-45, 45), seed=seed),
], seed=seed)
return augment
def show(image: np.ndarray) -> None:
plt.figure(figsize=(9, 6))
plt.imshow(image)
plt.axis("off")
plt.show()
# + papermill={"duration": 0.064016, "end_time": "2021-10-08T18:01:49.982452", "exception": false, "start_time": "2021-10-08T18:01:49.918436", "status": "completed"} tags=[]
TRANSFORM_PRE = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),
])
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SEED = 0
# + [markdown] papermill={"duration": 0.010605, "end_time": "2021-10-08T18:01:50.004009", "exception": false, "start_time": "2021-10-08T18:01:49.993404", "status": "completed"} tags=[]
# ## Dataset Template
# + papermill={"duration": 0.02054, "end_time": "2021-10-08T18:01:50.035413", "exception": false, "start_time": "2021-10-08T18:01:50.014873", "status": "completed"} tags=[]
class DS(Dataset):
def __init__(self, images=None, transform=None):
self.images = images
self.transform = transform
def __len__(self):
return self.images.shape[0]
def __getitem__(self, idx):
return self.transform(self.images[idx])
# + [markdown] papermill={"duration": 0.010917, "end_time": "2021-10-08T18:01:50.057748", "exception": false, "start_time": "2021-10-08T18:01:50.046831", "status": "completed"} tags=[]
# ## Build DataLoader
# + papermill={"duration": 0.017431, "end_time": "2021-10-08T18:01:50.086280", "exception": false, "start_time": "2021-10-08T18:01:50.068849", "status": "completed"} tags=[]
def build_dataloader(images: np.ndarray, transform=None):
data_setup = DS(images=images, transform=transform)
data = DL(data_setup, batch_size=64, shuffle=False)
return data
# + [markdown] papermill={"duration": 0.010617, "end_time": "2021-10-08T18:01:50.107566", "exception": false, "start_time": "2021-10-08T18:01:50.096949", "status": "completed"} tags=[]
# ## Build Model
# + papermill={"duration": 0.025685, "end_time": "2021-10-08T18:01:50.144411", "exception": false, "start_time": "2021-10-08T18:01:50.118726", "status": "completed"} tags=[]
def build_model(model_name: str, pretrained=True):
class ImageModel(nn.Module):
def __init__(self, model_name=None, pretrained=False):
super(ImageModel, self).__init__()
if re.match(r"^resnet$", model_name, re.IGNORECASE):
self.features = models.resnet50(pretrained=pretrained, progress=True)
if pretrained:
self.freeze()
self.features = nn.Sequential(*[*self.features.children()][:-1])
self.features.add_module("Flatten", nn.Flatten())
if re.match(r"^vgg$", model_name, re.IGNORECASE):
self.features = models.vgg16_bn(pretrained=pretrained, progress=True)
if pretrained:
self.freeze()
self.features = nn.Sequential(*[*self.features.children()][:-2])
self.features.add_module("Adaptive Average Pool", nn.AdaptiveAvgPool2d(output_size=(2, 2)))
self.features.add_module("Flatten", nn.Flatten())
if re.match(r"^densenet$", model_name, re.IGNORECASE):
self.features = models.densenet169(pretrained=pretrained, progress=True)
if pretrained:
self.freeze()
self.features = nn.Sequential(*[*self.features.children()][:-1])
self.features.add_module("Adaptive Average Pool", nn.AdaptiveAvgPool2d(output_size=(1, 1)))
self.features.add_module("Flatten", nn.Flatten())
def freeze(self):
for params in self.parameters():
params.requires_grad = False
def forward(self, x):
return self.features(x)
# breaker()
# print("Building Model ...")
model = ImageModel(model_name=model_name, pretrained=pretrained)
return model
# + [markdown] papermill={"duration": 0.010553, "end_time": "2021-10-08T18:01:50.165728", "exception": false, "start_time": "2021-10-08T18:01:50.155175", "status": "completed"} tags=[]
# ## Acquire Features Helper
# + papermill={"duration": 0.018645, "end_time": "2021-10-08T18:01:50.195215", "exception": false, "start_time": "2021-10-08T18:01:50.176570", "status": "completed"} tags=[]
def get_features(model=None, dataloader=None, num_features=None):
model.to(DEVICE)
model.eval()
y_pred = torch.zeros(1, num_features).to(DEVICE)
for X in dataloader:
X = X.to(DEVICE)
with torch.no_grad():
output = model(X)
y_pred = torch.cat((y_pred, output.view(-1, num_features)), dim=0)
return y_pred[1:].detach().cpu().numpy()
# + [markdown] papermill={"duration": 0.010807, "end_time": "2021-10-08T18:01:50.217030", "exception": false, "start_time": "2021-10-08T18:01:50.206223", "status": "completed"} tags=[]
# ## Obtain and Save Features
# + papermill={"duration": 0.021082, "end_time": "2021-10-08T18:01:50.249104", "exception": false, "start_time": "2021-10-08T18:01:50.228022", "status": "completed"} tags=[]
def save_features():
start_time = time()
images = np.load("../input/pet-finder-images/Images.npy")
breaker()
print("Time Taken to Read Data : {:.2f} minutes".format((time()-start_time)/60))
# breaker()
# print("Building DataLoaders ...")
r.seed(SEED)
seeds = [r.randint(0, 99) for _ in range(10)]
model_names = ["densenet", "resnet", "vgg"]
num_features = [1664, 2048, 2048]
breaker()
for seed in seeds:
augment = get_augment(seed)
images = augment(images=images)
dataloader = build_dataloader(images=images, transform=TRANSFORM_PRE)
for i in range(len(model_names)):
print("Model: {}, Seed: {}".format(model_names[i], seed))
model = build_model(model_name=model_names[i], pretrained=True)
# breaker()
# print("Obtaining Features ...")
features = get_features(model, dataloader, num_features=num_features[i])
# breaker()
# print("Saving Features as a .npy File ...")
np.save("./{}_features_seed_{}.npy".format(model_names[i], seed), features)
breaker()
# + papermill={"duration": 828.395453, "end_time": "2021-10-08T18:15:38.656268", "exception": false, "start_time": "2021-10-08T18:01:50.260815", "status": "completed"} tags=[]
save_features()
# + papermill={"duration": 0.028937, "end_time": "2021-10-08T18:15:38.706312", "exception": false, "start_time": "2021-10-08T18:15:38.677375", "status": "completed"} tags=[]
breaker()
print("Notebook Run Time : {:.2f} minutes".format((time()-notebook_start_time)/60))
breaker()
| Notebooks - 1/Pretrained Features (Augmented).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# #### Write a function in python that can reverse a string using stack data structure. Use Stack class from the tutorial.
# #### reverse_string("We will conquere COVID-19") should return "91-DIVOC ereuqnoc lliw eW"
s=[]
s.append('We will conquere COVID-19')
s
dir(s)
# +
from collections import deque
class Stack:
def __init__(self):
self.container = deque()
def push(self, val):
self.container.append(val)
def pop(self):
return self.container.pop()
def peek(self):
return self.container[-1]
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
# +
def reverse_string(s):
stack = Stack()
for ch in s:
stack.push(ch)
rstr = ''
while stack.size()!=0:
rstr += stack.pop()
return rstr
if __name__ == '__main__':
print(reverse_string("We will conquere COVI-19"))
print(reverse_string("I am the king"))
# -
# #### Write a function in python that checks if paranthesis in the string are balanced or not. Possible parantheses are "{}',"()" or "[]". Use Stack class from the tutorial.
# +
from collections import deque
class Stack:
def __init__(self):
self.container = deque()
def push(self, val):
self.container.append(val)
def pop(self):
return self.container.pop()
def peek(self):
return self.container[-1]
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
# +
def is_match(ch1, ch2):
match_dict = {
')': '(',
']': '[',
'}': '{'
}
return match_dict[ch1] == ch2
def is_balanced(s):
stack = Stack()
for ch in s:
if ch=='(' or ch=='{' or ch == '[':
stack.push(ch)
if ch==')' or ch=='}' or ch == ']':
if stack.size()==0:
return False
if not is_match(ch,stack.pop()):
return False
return stack.size()==0
if __name__ == '__main__':
print(is_balanced("({a+b})"))
print(is_balanced("))((a+b}{"))
print(is_balanced("((a+b))"))
print(is_balanced("((a+g))"))
print(is_balanced("))"))
print(is_balanced("[a+b]*(x+2y)*{gg+kk}"))
# -
| stack/stack_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# # Building a Full ML Pipeline
#
# Now that we've learned how to use individual parts of scikit-learn, let's put things together to build a full machine learning pipeline to perform facial recognition.
#
# First, we'll download the data and take a look at it.
# +
from sklearn.datasets import fetch_olivetti_faces
people = fetch_olivetti_faces()
# -
print(people.DESCR)
plt.figure(figsize=(12, 9))
for i in range(12):
plt.subplot(3, 4, i+1)
plt.imshow(people.images[i], cmap='gray')
plt.xticks([])
plt.yticks([])
print(people.data.shape)
# Using the full data vector to try to predict the subject's identity would probably not perform very well and would be computationally expensive. We'll preprocess the data by doing a principal component analysis (PCA) to reduce the dimensionality of the data.
#
# PCA projects a high-dimensional vector into a low-dimensional space using Singular Value Decomposition. This effectively decomposes the data into a set of orthogonal components that each attempt to explain the maximum amount of variance.
#
# Let's find some eigenfaces:
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(people.data, people.target, test_size=0.25)
# +
from sklearn.decomposition import PCA
n_components = 150
pca = PCA(n_components=n_components)
pca.fit(X_train)
eigenfaces = pca.components_.reshape((n_components, 64, 64))
# -
plt.figure(figsize=(12, 9))
for i, face in enumerate(eigenfaces[:12]):
plt.subplot(3, 4, i+1)
plt.imshow(face, cmap='gray')
plt.xticks([])
plt.yticks([])
# Each of these eigenfaces is one of the orthogonal basis vectors of our training set. By projecting each face into this basis, we reduce the length of the data vector from 4096 to one with length $n_{components}$. The reverse process is also possible. Let's look at a face reconstructed from its eigenface projection.
# +
X_train_pca = pca.transform(X_train)
reconstructed_X_train = np.tensordot(X_train_pca, eigenfaces, axes=(-1, 0))
plt.figure(figsize=(12, 6))
for i, face in enumerate(X_train[:8]):
plt.subplot(2, 4, i+1)
plt.imshow(face.reshape((64, 64)), cmap='gray')
plt.xticks([])
plt.yticks([])
plt.suptitle('Original', fontsize=32)
plt.show()
plt.figure(figsize=(12, 6))
for i, face in enumerate(reconstructed_X_train[:8]):
plt.subplot(2, 4, i+1)
plt.imshow(face, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.suptitle('PCA reconstructed', fontsize=32)
plt.show()
# -
# Now, we'll use the lower-dimensional vectors describing the faces in the dataset to train a support vector machine to do the subject classification. A full discussion of SVM is beyond the scope of this tutorial, so we will treat it as a black box classifier. We'll use grid search cross-validation to find the best values of the hyperparameters used in the SVM.
# +
# Project training and test sets into the PCA eigenbasis
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
# Train the SVM classifier using cross-validation to tune parameters
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [1e-4, 5e-4, 1e-3, 5e-3, 0.01, 0.1]}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
# -
# Let's see how well the SVM did. We'll use a confusion matrix to visualize the accuracy of the predictions from the SVM.
# +
from sklearn.metrics import confusion_matrix
plt.figure(figsize=(12, 10))
cm = confusion_matrix(y_test, clf.predict(X_test_pca))
plt.imshow(cm/cm.sum(axis=1)[:, np.newaxis])
plt.colorbar()
plt.xlabel('Predicted label')
plt.ylabel('Actual label')
# -
# Almost perfect!
| sklearn/05_facial_recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://github.com/RomelTorres/av_example/blob/master/Alpha%20vantage%20examples.ipynb
# %matplotlib inline
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.techindicators import TechIndicators
from alpha_vantage.sectorperformance import SectorPerformances
from alpha_vantage.cryptocurrencies import CryptoCurrencies
import matplotlib
import matplotlib.pyplot as plt
import os
# Make plots bigger
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0)
# # Working with time Series
# We first download the data with our api key.
API_KEY = os.environ['ALPHAVANTAGE_API_KEY']
ts = TimeSeries(key=API_KEY, output_format='pandas')
data, meta_data = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='full')
# We can describe it
data.describe()
data.head()
#data.describe()
data['4. close'].plot()
plt.title('Intraday Times Series for the MSFT stock (1 min)')
plt.grid()
plt.show()
# Check the meta data given back by the api call.
meta_data
# ## Getting csv data
#
# https://realpython.com/python-csv/
ts = TimeSeries(key=API_KEY, output_format='csv')
data_csv,_ = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='compact')
data_csv
list(data_csv)
# ## Plotting time series
data = data.drop('1. open',1)
data = data.drop('5. volume',1)
data.plot()
plt.title('Intraday Times Series for the MSFT stock (1 min)')
plt.grid()
plt.show()
type(data)
# # Working with batch quotes
#
# no longer available
# + active=""
# # It is still a TimeSeries call
# ts.output_format='pandas'
# data, meta_data = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))
# data.describe()
# -
data.head(3)
# # Working with technical indicators
ti = TechIndicators(key=API_KEY, output_format='pandas')
data, meta_data = ti.get_bbands(symbol='MSFT', interval='60min', time_period=60)
data.describe()
meta_data
# ## Plotting technical indicators
data.plot()
plt.title('BBbands indicator for MSFT stock (60 min)')
plt.grid()
plt.show()
ti = TechIndicators(key=API_KEY, output_format='pandas')
data, meta_data = ti.get_sma(symbol='MSFT')
data.plot()
plt.title('SMA indicator for MSFT stock (daily)')
plt.grid()
plt.show()
# # Working with Sector Performance
sp = SectorPerformances(key=API_KEY, output_format='pandas')
data, meta_data = sp.get_sector()
data.describe()
meta_data
# ## Plotting Sector Performance
data['Rank A: Real-Time Performance'].plot(kind='bar')
plt.title('Real Time Performance (%) per Sector')
plt.tight_layout()
plt.grid()
plt.show()
# # Working with Crypto Currencies
# To just get the current exchange rate
cc = CryptoCurrencies(key=API_KEY)
# To get the intraday price of bitcoin
# +
# I changed the internal format of the the class to be our friendly data frame.
cc.output_format='pandas'
# data, meta_data = cc.get_digital_currency_intraday(symbol='BTC', market='CNY')
data, meta_data = cc.get_digital_currency_daily(symbol='BTC', market='CNY')
data.describe()
# -
data.head(5)
data['4b. close (USD)'].plot()
plt.tight_layout()
plt.title('Daily value for bitcoin (BTC)')
plt.grid()
plt.show()
| docs/examples/alpha-vantage-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import gtts
from IPython.display import display, Audio
from ipywidgets import interact, interact_manual
from time import sleep
def speak(text, lang='en', autoplay=True, echo=False):
'''
This will use gTTS to take your text and speak it.
Example:
speak("Hello!")
'''
speech = gtts.gTTS(text, slow=False, lang=lang)
speech.save("demo.mp3")
if echo:
print(text)
return Audio("demo.mp3",autoplay=autoplay)
@interact_manual(text="hello there", lang="en")
def say_it(text,lang):
speech = speak(text,lang,autoplay=False)
sleep(3)
display(speech)
| lessons/05-Functions/WMC4-Ipython-interact.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.tree as tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import graphviz
# %matplotlib inline
credit_df = pd.read_csv('/Users/saptarshighose/Downloads/Machine-Learning/HW2/credit-data.csv')
# # Data Pre-Processing
credit_df['NumberOfDependents'].describe()
credit_df['MonthlyIncome'].describe()
# +
#Chose to fill missing data with mean here because results are not being skewed strongly by outliers as seen above.
#Mean and median are also close -- but median for number of dependents is 0 -- so may be less meaningful in understanding
#more granular context here.
mean_dependents = credit_df.NumberOfDependents.mean()
credit_df['NumberOfDependents'].fillna(mean_dependents, inplace=True)
mean_income = credit_df.MonthlyIncome.mean()
credit_df['MonthlyIncome'].fillna(mean_income, inplace=True)
# -
credit_df.head(5)
credit_df.shape
# # Data Exploration
# ## Correlation Heatmap
import seaborn as sns
corr = credit_df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# ## Age by mean number of dependents
mean_age = credit_df.groupby('age').mean()
x = list(mean_age.index.values)
y = list(mean_age.NumberOfDependents.values)
plt.plot(x, y)
plt.show()
# ## Age by mean number of delinquencies
x = list(mean_age.index.values)
y = list(mean_age.SeriousDlqin2yrs.values)
plt.plot(x, y)
plt.show()
# ## Age by mean number of number of open credit lines and loans
x = list(mean_age.index.values)
y = list(mean_age.NumberOfOpenCreditLinesAndLoans.values)
plt.plot(x, y)
plt.show()
# # Generate features and predictors
# ## Take a categorical variable (age) and create binary/dummy variable (senior citizen or not) from it
# +
def add_dummy_variable(df, var, dummy_var, lambda_equation):
df[dummy_var] = df[var].apply(lambda_equation)
add_dummy_variable(credit_df, 'age', 'Senior_Citizen', lambda x: 65 < x )
# -
credit_df.head()
# ## Discretize a continuous variable
credit_df['Discrete_Monthly_Income'] = pd.cut(credit_df['MonthlyIncome'], 500)
# # Build Classifier - KNN
adjust_df = credit_df.drop(['Senior_Citizen', 'Discrete_Monthly_Income'], axis =1)
adjust_df.columns
def data_split(df, var, test_size):
X = df
Y = df[var]
test_size = 0.3
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=test_size)
return x_train, x_test, y_train, y_test
x_train, x_test, y_train, y_test = data_split(adjust_df, 'SeriousDlqin2yrs', .3)
knn = KNeighborsClassifier(n_neighbors=10, metric='minkowski', metric_params={'d': 3})
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(x_train, y_train)
knn.predict_proba(x_test)
knn.score(x_train,y_train)
knn.score(x_test,y_test)
# # Write-up
# The first step of building the machine learning pipeline for this assignment was to download and read in the provided data into Jupyter notebook as a pandas dataframe. I then explored the data – including generating distributions of the data, building a correlation heat map of the relevant variables, finding outliers, and summarizing the data. In this process, I noticed that two fields had missing values – specifically the fields NumberOfDependents and MonthlyIncome. I chose to fill in missing data with the mean value of from all other records in the field because the data exploration showed that the results are not being skewed strongly by outliers. The mean and median are also close -- but median for NumberOfDependents is 0 -- so may be less meaningful in understanding more granular context.
#
# In the data exploration phase, I also noticed some interesting points. Specifically, when I built a chart of age by mean number of dependents, I saw that for age 60, people's number of delinquincies is relatively low -- ~.15 delinquincies on average. When I built a chart of age by mean number of open credit lines and loans and looked at the results for age 60, there was an average of ~10 open credit lines. I found this to be an interesting juxtaposition in the data exploration phase.
#
# Then, I proceeded to generate features and predictors – including discretizing the continuous variable 'MonthlyIncome'. I also created a dummy variable here for whether a person is a Senior Citizen -- which is binary for if the person is age 65 or above (then TRUE) else FALSE. After this step, I built the classifier using KNN and the minkowski distance metric. For the classification model, SeriousDlqin2yrs was the outcome variable and the rest of the relevant variables as explanatory-- as specified by the assignment prompt. I played around with the parameters of the model and chose a test size of .3 and 10 nearest neighbors. Finally, I evaluated the classification model and found that the accuracy score on the test data was .897 while it was 1.0 for the training data. Normally I would be concerned about overfitting with the 1.0 score for the training score, but Rayid said that this is acceptable for this homework.
#
| HW2/Machine Learning HW2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FRMOD tutorial
# This notebook is a guide for performing a frequency (or likelihood) ratio style landslide susceptibility analysis with the frmod Python package.
# ## The analysis in brief
# The script uses a probabilistic method for landslide susceptibility assessment. It assumes, that landslide-affected areas in the future will have similar terrain and environmental conditions to the already landslide-affected areas. The inputs of the analysis are the landslide sample areas and the continuous or categorical data layers of the analyzed variables. The method works with raster grids. The analysis has two variations, the frequency ratio and the likelihood ratio.
# The steps of the analysis:
#
# 1. Partition the study area into landslide and non-landslide subareas
# 2. Compute the frequency distribution of the analyzed variables for the landslide, the non-landslide, and the total area
# 3. Compute the ratios (weights)
# - Frequency ratio: Take the ratio of the landslide and total area frequency distributions - *the frequency ratio* - for each analyzed variable
# - Likelihood ratio: Take the ratio of the landslide and non-landslide frequency distributions - *the likelihood ratio* - for each analyzed variable
# 4. Create the **weighted grids**: assign the ratios to the corresponding values of the analyzed variable grids
# 5. Get the landslide **susceptibility grid**: average the **weighted grids**
#
# The results are used to make a landslide susceptibility map and are usually shown in a percentile form on the map. *Percentile is a score below which a given percentage of the values fall.*
#
# The **frmod** package is a tool for performing the frequency (or likelihood) ratio analysis on raster grids. Let's see how it works!
#
# *You can run the code cells by clicking into them and pressing the play button above or shift+enter. Or you can run the whole notebook by pressing the fast forward button above.*
# ## Setting up the analysis
# Import numpy, pyplot, and the necessary modules from the frmod package so you can run the analysis.
# *Set the random seed to get similar results for every run.*
# +
import numpy as np
import matplotlib.pyplot as plt
from frmod.analysis import VRaster, LandslideMask, FRAnalysis, show_grid
np.random.seed(2021)
print("The modules are imported, you are good to go.")
# -
# **The analysis needs two types of input variables:**
# - the layers to be analyzed, these are loaded in as the **VRasters**
# - a mask layer that marks the landslide and non-landslide areas, this is the **LandslideMask**
# ### Importing the data to be analyzed
#
# **VRaster** objects store the data related to the analyzed layers (eg. elevation, slope). You have to specify 4 arguments to create a VRaster for the analysis:
# - **name** : The name of the VRaster. Eg. *'elevation'*
# - **path** : Path to the raster grid of the layer. All rasters used in the analysis must be in the same grid system (resolution, cellsize, projection). The layer must be compatible with GDAL, but this should not be an issue for most of the common raster formats. Eg. *'./data/analyzed_layer.sdat'*
# - **bins** : The non-categorical data will be sorted into this many equal-width categories for the analysis. Eg. *10*
# - **categorical** : Set it True if the layer is categorical and set it False if the layer is not categorical. Eg. *True*
#
# We'll import three layers: elevation, slope, and geology. Geology is a categorical layer, its values are category IDs not quantities.
elevation = VRaster(name='elevation',
path='./data/SRTM31_EG_GF_m.sdat',
bins=50,
categorical=False)
slope = VRaster(name='slope',
path='./data/SRTM31_EG_GF_Slope_m.sdat',
bins=20,
categorical=False)
geology = VRaster(name='geology_14',
path='./data/fdt100_14k.sdat',
categorical=True)
# Let's check what we have just imported. The values of the imported layers are stored in the grid property of the VRasters as arrays, eg. *elevation.grid*. The show_grid() is used to plot these grids.
show_grid(elevation.grid, nodata=elevation.nodata, name='Elevation', cmap='terrain')
elevation.show_info()
# ### Importing the landslide sample areas
# **LandslideMask** objects store the data for the mask. The mask is used to separate the landslide and the non-landslide areas. The splits for the cross-validation are also computed here. *The cross-validation will be explained later in this notebook.*
# - **name** : The name of the LandslideMask. Eg. *'scarps'*
# - **path** : Path to the raster grid of the layer. The same conditions apply as for the VRasters. Eg. *'./data/landslide_layer.sdat'*
# - **ls_marker** : The value marking the landslide pixels in the mask layer. Eg. *1*
# - **nls_marker** : The value marking the landslide pixels in the mask layer. Eg. *0*
# - **fold_count** : The number of folds for the cross-validation. Eg. *5*
scarps = LandslideMask(name='scarps',
path='./data/scarps.sdat',
ls_marker=1,
fold_count=5)
# Let's plot the mask. Non-landslide areas are blue and landslide areas are red.
show_grid(scarps.grid, nodata=scarps.nodata, name='The sample landslide areas', cmap='seismic', interpolation='none')
# ## Running the analysis
# The **frequency ratio analysis** is performed on a list of **VRasters** and on a **LandslideMask**. These hold the grids of the analyzed data and the landslide mask.
#
# You have to create an **FRAnalysis** object from the rasters and the mask to perform the analysis. This object will store all the data produced during the analysis.
#
# - **ls_mask** : The LandslideMask for the analysis
# - **var_list** : The list of the VRasters to analyze
fra = FRAnalysis(ls_mask=scarps,
var_list=[slope,
geology,
elevation]
)
# The **frequency ratios** and the **reclassified frequency ratio grids** are computed on the creation of the **FRAnalysis** object. The statistics are stored in the **fr_stats_full** property of the object. The reclassified grids are stored in the **rc_folds** property.
# Let's check the statistics for the slope layer.
fra.fr_stats_full['slope'][0]
# You can also plot the statistics. Change 'slope' to 'elevation' or 'geology_14' to check the other layers.
slope_plot = fra.plot_var_fold_fr('geology_14', 0)
# ### Susceptibility grid
# The susceptibility grid is computed by averaging the reclassified grids. You have to call the get_result function of the FRAnalysis to compute the susceptibility grid and some other related data.
# Let's compute and plot the susceptibility estimates.
result_percentile_bins = fra.get_result()
show_grid(fra.fold_susceptibility[0], nodata=-99999, name="Susceptibility")
# The results are also available in a percentile form. In the percentile form, X means that X% of the pixels in the study area have lower susceptibility than pixels with X susceptibility. Eg. 90% of the pixels have lower susceptibility than pixels in the 90th percentile.
# Plot the percentile grid.
show_grid(fra.fold_percentiles[0], nodata=-99999, name="Susceptibility - percentiles")
# ## Cross-validation
# *Scientists need a way (or more) to test the correctness of their findings. In our case, we'd like to see if the areas with high landslide susceptibility scores are actually prone to landslides or not. To get the most accurate measurement, we'd have to wait several years and evaluate the results with exhaustive ground checks on the field. In the meantime, we have to work with the already available data to get information about the accuracy of the results.*
#
# The **frmod** script uses k-fold cross validation with random splits to evaluate the results.
# 1. The landslide area is split into equal sized parts, called splits.
# 2. One part is attached to the non-landslide area, these are the validation pixels
# 3. The result of the analysis is evaluated by checking the number of validation pixels in the different susceptibility categories
# 4. This process is then repeated with each split
# 5. The final susceptibility estimates are the average of the results of the runs with the different splits
#
# The cross-validation is always used during the analysis. The splits are created with the LandslideMask.
# The results you have just computed were the results with the first split. You can plot the distribution of the validation pixels in the susceptibility categories for the first split.
# To check the results, just run the code below.
fig, ax = plt.subplots()
plt.title("Susceptibility distribution of validation pixels")
ax.set_xlabel("Susceptibility, percentile")
ax.set_ylabel("Proportion of validation pixels, %")
d = ax.plot(fra.v_dist[0] * 100, drawstyle="steps-mid")
ax.set_xlim(left=0, right=100)
ax.set_ylim(bottom=0)
# The results are considered more accurate when a higher number of validation pixels fall into the higher susceptibility categories. It would be the best if only validation pixels would be in the highest susceptibility categories.
# #### Metrics - Success rate curve
# The cumulative sum of the distribution above is called the success rate curve. It shows the proportion of validation pixels that fall into or below a certain susceptibility category.
# Let's compute and plot this curve for all splits by calling the get_src() and plot_success_rates() function of our FRAnalysis.
success_rates = fra.get_src()
fig_sr, ax_sr = fra.plot_success_rates()
# #### Metrics - Area under the success rate curve
# The accuracy of the model can be measured by the area under the cumulative distribution curve. Our aim is to have most of the validation landslide pixels in the high susceptibility categories, ie. to get the area under the cumulative distribution curve as small as possible.
# The area under the curve (AUC) is computed by the get_auc() function of our FRAnalysis class object. The function prints the **AUC** values for all splits along with the mean and the standard deviation of the scores.
auc_folds = fra.get_auc()
# If you would like to experiment with the analysis, you can change some parameters and run it again. Try to get a better score than the current one.
# ## The final result
# The analysis is performed with cross-validation that produces one susceptibility grid for each split. The final result of the analysis is the average of the susceptibility grids, converted into the percentile form. Compute and plot it with the get_percentile_grid(show=True) function.
fra.get_percentile_grid(show=True)
# ## Output
# The frmod package stores the results and the related statistics in the corresponding FRAnalysis object. It is possible to plot or export the created data with a set of functions. This way you can analyze or view the results elsewhere.
#
# frmod's utils module has a set of function for reading and writing raster grids.
# The **array2raster()** is used to save the 2D arrays (grids) that we produced during the analysis. The function needs 3 parameters + 1 optional parameter:
# - **rasterfn** : Path to the raster used as a sample. The function copies the characterictics (shape, resolution, projection, nodata) of this raster to the new raster
# - **new_raster_fn** : Path and name of the new raster
# - **array** : The array to save
# - **driver** : A GDAL raster driver, eg. *'GTiff'*
#
# Save the final percentile susceptibility grid with it.
# +
import os
from frmod.utils import array2raster
# Create the output folder if it does not exist
if os.path.isdir('output') is False:
os.makedirs('output')
# Save the percentile susceptibility grid (array) to a georeferenced raster
array2raster(rasterfn=slope.path,
new_raster_fn='./output/percentile_susceptibility.tif',
array=fra.percentile_grid,
driver='GTiff'
)
# -
# The success rate curves can also be saved to a csv file:
# - **folder** : Path to the output folder. The default is "./output/".
# - **fname** : Output filename with extension. The default is "src.csv".
fra.save_src(folder='./output/', fname='success_rates.csv')
# The save_stats function of the FRAnalysis object saves the frequency ratios for each analyzed layer and split to csv files.
# - **folder** : Path to the output folder. The default is "./output/".
# - **tag** : Tag inserted to the beginning of the file name. The default is "".
#
fra.save_stats(tag="notebook_output")
# The statistics with the landslide and non-landslide distributions and the frequency ratios are stored in the fr_stats_full dictionary of the FRanalysis. The keys are the name properties of the VRasters. The items are lists with one pandas.DataFrame for each fold's statistics. You can use the built-in methods of pandas for saving these statistics.
import pandas as pd
fra.fr_stats_full['slope'][0].to_csv('./output/fr_stats_slope_1.csv')
# Check if the files have been saved to the output folder by listing the files in the folder.
os.listdir('./output')
| frmod_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Data Cleaning
# Category_name, brand_name and item_description have null values. So we are processing this coulumns
import pandas as pd
# get data
df = pd.read_csv("https://raw.githubusercontent.com/jinchen1036/Product-Price-Prediction/main/data/sample_data.csv",sep=",")
def fill_missing_data(data):
data['category_name'].fillna(value='Other', inplace=True)
data.brand_name.fillna(value = "Unknown", inplace = True)
data.item_description.fillna(value = "No description yet", inplace = True)
return data
import numpy as np
mercaridf = fill_missing_data(df)
print(np.shape(mercaridf))
mercaridf.isnull().any()
mercaridf.head()
# ### Remove Items with 0 price
mercaridf = mercaridf[mercaridf.price != 0]
| experiment/data_preprocess/Data_Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab_type="code"
# -*- coding: utf-8 -*-
# + [markdown] colab_type="text"
# ## Runing a small timeseries forecast
#
# Before running a time series forecast, the initial data set has to be generated first via oneliner. The online `four_step_search` combines four types of trend search:
#
# 1. Overtime
# 2. By regions
# 3. By related topics
# 4. By related queries
#
# However, this example is only focusing on the dateframe from *Overtime*.
#
# + colab_type="code"
# !pip install git+https://github.com/AI2Business/ai2business.git
# + colab_type="code"
from ai2business.macros import oneliner
# + [markdown] colab_type="text"
# Hence, search trend for the years "2017", "2018", "2019", and "2020" will be generated and plotted.
#
# ### Note
#
# A dependency between the years is obviously given, even if single event trigger breakouts.
#
# + colab_type="code"
keyword_list: list = ["2017", "2018", "2019", "2020"]
timeframe = oneliner.TrendSearch.four_step_search(keyword_list=keyword_list)
timeframe["interest_over_time"].plot()
# + [markdown] colab_type="text"
# And the Pearson-correlation shows the negative linear dependency between the current and previous year.
#
# + colab_type="code"
timeframe["interest_over_time"].corr()
dataset = timeframe["interest_over_time"].drop(columns="isPartial")
print(dataset)
# + [markdown] colab_type="text"
# ### Loading the automl modul.
#
# + colab_type="code"
from sklearn.model_selection import train_test_split
from ai2business.ai_engines import automl_neural_network as an
# + [markdown] colab_type="text"
# ### Setup the Timeseries Forecaster.
#
# + colab_type="code"
x_train, y_train, x_test, y_test = train_test_split(
dataset.iloc[:, 0:2].values,
dataset.iloc[:, 3].values,
test_size=0.33,
random_state=42,
)
context = an.AutoMLPipeline(an.TimeseriesForecaster())
context.run_automl()
# + [markdown] colab_type="text"
# ### Fitting the Timeseries Forecaster.
#
# + colab_type="code"
context.train = an.AutoMLFit(x_train, y_train, batch_size=32, epochs=1)
context.run_automl()
# + [markdown] colab_type="text"
# ### Evaluate the Timeseries Forecaster.
#
# + colab_type="code"
context.train = an.AutoMLEvaluate(x_test, y_test, batch_size=32)
context.run_automl()
| docs/ipynb/timeseries_forecast_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Feature Engineering](https://www.kaggle.com/learn/feature-engineering) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/creating-features).**
#
# ---
#
# # Introduction #
#
# In this exercise you'll start developing the features you identified in Exercise 2 as having the most potential. As you work through this exercise, you might take a moment to look at the data documentation again and consider whether the features we're creating make sense from a real-world perspective, and whether there are any useful combinations that stand out to you.
#
# Run this cell to set everything up!
# +
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.feature_engineering_new.ex3 import *
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score
from xgboost import XGBRegressor
def score_dataset(X, y, model=XGBRegressor()):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
# Metric for Housing competition is RMSLE (Root Mean Squared Log Error)
score = cross_val_score(
model, X, y, cv=5, scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
# Prepare data
df = pd.read_csv("../input/fe-course-data/ames.csv")
X = df.copy()
y = X.pop("SalePrice")
# -
# -------------------------------------------------------------------------------
#
# Let's start with a few mathematical combinations. We'll focus on features describing areas -- having the same units (square-feet) makes it easy to combine them in sensible ways. Since we're using XGBoost (a tree-based model), we'll focus on ratios and sums.
#
# # 1) Create Mathematical Transforms
#
# Create the following features:
#
# - `LivLotRatio`: the ratio of `GrLivArea` to `LotArea`
# - `Spaciousness`: the sum of `FirstFlrSF` and `SecondFlrSF` divided by `TotRmsAbvGrd`
# - `TotalOutsideSF`: the sum of `WoodDeckSF`, `OpenPorchSF`, `EnclosedPorch`, `Threeseasonporch`, and `ScreenPorch`
# +
# YOUR CODE HERE
X_1 = pd.DataFrame() # dataframe to hold new features
X_1["LivLotRatio"] = X['GrLivArea']/X['LotArea']
X_1["Spaciousness"] = (X['FirstFlrSF'] + X['SecondFlrSF'])/X['TotRmsAbvGrd']
X_1["TotalOutsideSF"] = X['WoodDeckSF']+X['OpenPorchSF']+X['EnclosedPorch']+X['Threeseasonporch']+X['ScreenPorch']
# Check your answer
q_1.check()
# +
# Lines below will give you a hint or solution code
#q_1.hint()
#q_1.solution()
# -
# -------------------------------------------------------------------------------
#
# If you've discovered an interaction effect between a numeric feature and a categorical feature, you might want to model it explicitly using a one-hot encoding, like so:
#
# ```
# # One-hot encode Categorical feature, adding a column prefix "Cat"
# X_new = pd.get_dummies(df.Categorical, prefix="Cat")
#
# # Multiply row-by-row
# X_new = X_new.mul(df.Continuous, axis=0)
#
# # Join the new features to the feature set
# X = X.join(X_new)
# ```
#
# # 2) Interaction with a Categorical
#
# We discovered an interaction between `BldgType` and `GrLivArea` in Exercise 2. Now create their interaction features.
# +
# YOUR CODE HERE
# One-hot encode BldgType. Use `prefix="Bldg"` in `get_dummies`
X_2 = pd.get_dummies(X['BldgType'], prefix="Bldg")
# Multiply
X_2 = X_2.mul(X['GrLivArea'], axis=0)
# Check your answer
q_2.check()
# +
# Lines below will give you a hint or solution code
#q_2.hint()
#q_2.solution()
# -
# # 3) Count Feature
#
# Let's try creating a feature that describes how many kinds of outdoor areas a dwelling has. Create a feature `PorchTypes` that counts how many of the following are greater than 0.0:
#
# ```
# WoodDeckSF
# OpenPorchSF
# EnclosedPorch
# Threeseasonporch
# ScreenPorch
# ```
# +
X_3 = pd.DataFrame()
# YOUR CODE HERE
components = ['WoodDeckSF','OpenPorchSF','EnclosedPorch','Threeseasonporch','ScreenPorch']
X_3["PorchTypes"] = X[components].gt(0).sum(axis=1)
# Check your answer
q_3.check()
# +
# Lines below will give you a hint or solution code
#q_3.hint()
#q_3.solution()
# -
# # 4) Break Down a Categorical Feature
#
# `MSSubClass` describes the type of a dwelling:
df.MSSubClass.unique()
# You can see that there is a more general categorization described (roughly) by the first word of each category. Create a feature containing only these first words by splitting `MSSubClass` at the first underscore `_`. (Hint: In the `split` method use an argument `n=1`.)
X["MSSubClass"].str.split("_", n=1,expand=True)
# +
X_4 = pd.DataFrame()
# YOUR CODE HERE
X_4["MSClass"] = ( # Create two new features
X["MSSubClass"] # from the Policy feature
.str # through the string accessor
.split("_", n=1,expand=True)[0] # by splitting on "_"
# and expanding the result into separate columns
)
# Check your answer
q_4.check()
# -
# Lines below will give you a hint or solution code
q_4.hint()
q_4.solution()
# # 5) Use a Grouped Transform
#
# The value of a home often depends on how it compares to typical homes in its neighborhood. Create a feature `MedNhbdArea` that describes the *median* of `GrLivArea` grouped on `Neighborhood`.
pd.concat([X['Neighborhood'],X.groupby("Neighborhood")["GrLivArea"].transform("median")], axis = 1)
# +
X_5 = pd.DataFrame()
# YOUR CODE HERE
X_5["MedNhbdArea"] = (
X.groupby("Neighborhood") # for each state
["GrLivArea"] # select the income
.transform("median") # and compute its mean
)
# Check your answer
q_5.check()
# +
# Lines below will give you a hint or solution code
#q_5.hint()
#q_5.solution()
# -
# Now you've made your first new feature set! If you like, you can run the cell below to score the model with all of your new features added:
X_new = X.join([X_1, X_2, X_3, X_4, X_5])
score_dataset(X_new, y)
# # Keep Going #
#
# [**Untangle spatial relationships**](https://www.kaggle.com/ryanholbrook/clustering-with-k-means) by adding cluster labels to your dataset.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/221677) to chat with other Learners.*
| Feature Engineering/3 Creating Features/exercise-creating-features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
# # Built-in functions and Libraries (5 min / 5 min exercises)
# **Top 5 things** about built-in functions and libraries.
# #### Learning objectives
# By the end of this notebook you will know:
# + What are built-in functions and libraries.
# + What the main differences between them are.
# + How to interpret errors in python.
#
# These learning goals will be very important to successfully complete assignment 1.
# ## **1. A function may take zero or more arguments**:
# + An _argument_ is a value passed to a function.
# + `len` takes exactly one.
# + `int`, `str`, and `float` take one argument and return a new value.
# + `print` takes zero or more arguments.
# + `print` with no arguments prints a blank line.
# ## **2. Commonly used built-in functions include `max`, `min`, and `round`:**
# + Use `max` to find the higher value of one or more values.
# + Use `min` to find the lower value
# + Both functions work on character strings as well as numbers:
# - "higher" and "lower" use (0-9, A-Z, a-z) to compare letters.
#
# ## **3. Most of the power of a programming language is in its libraries:**
# + A _library_ is a collection of files (called a _module_) that contains functions for use by other programs.
# - It may also contain data values (e.g. numerical) and other things.
# + The Python [standard library](https://docs.python.org/3/library/) is an extensive suite of modules that comes with Python itself.
# + Many additional libraries are available from [PyPI](https://pypi.python.org/pypi/) (the Python Package Index).
#
# **REMEMBER**: A library is a collection of modules, but these terms are often used interchangeably, especially since many libraries only consist of a single module, so don’t worry if you mix them up.
# ## **4. We must import a library module before using the functions within it:**
#
# + Use `import` to load a library module into a program's memory.
# + Then refer to things (e.g., variables or functions) that belong to the module as `module_name.thing_name`.
# + Python uses `.` to mean 'part of'.
import numpy
print('pi is', numpy.pi) # pi is a variable in the math module
print('cos(pi) is', numpy.cos(numpy.pi)) # cos is a function in the math module
# **REMEMBER**:
# 1. Use `help` to learn about the content of a library module:
help(numpy)
# You can also view the library module's documentation online. For numpy there is a reference: https://numpy.org/doc/stable/reference/index.html
# 2. Create an alias for a library module when importing it to shorten programs.
# + Use import ... as ... to give a library a short alias while importing it.
# + Then refer to items in the library using that shortened name.
import numpy as np
print('cos(pi) is', np.cos(np.pi))
# ## **5. Python reports an error when it can't understand the source of a program:**
author = '<NAME>
print("poet"
age = 53
remaining = 100 - aege
# <i style="color:red">EXERCISES - 5 min</i>
# * _1. Run the following python code and think about what each of the print statements in the code below will print._
#
# ```python
# easy_string = "abc"
# print(max(easy_string))
# rich = "gold"
# poor = "tin"
# print(max(rich, poor))
# print(max(len(rich), len(poor)))
# ```
#
# * _1b. Does `max(len(rich), poor)` run or produce an error message? If it runs, does its result make any sense?_
#
#
# * _2. What function from the `numpy` module can you use to calculate a square root without using `sqrt`?_
#
#
# * _3. The following variable stores one of the longest words in the world._
#
# ```python
# longest_word = 'pneumonoultramicroscopicsilicovolcanoconiosis'
# ```
#
# _Suppose you want to select a random character from `longest_word`_:
#
#
# _Which [standard library](https://docs.python.org/3/library/) module could help you?_
#
# + _3.2. Which function would you select from that module? Are there alternatives?_
#
# * _4. Fill in the blanks so that the program below prints 90.0._
#
#
# ```python
# import numpy as np
# angle = ____.degrees(____.pi / 2)
# print(____)
# ```
# * _5. Rewrite the code above so that it uses import without `as`. Which form do you find easier to read?_
| semester2/notebooks/1.3-built-in-functions-and-libraries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
import csv
import numpy as np
import math
lines = csv.reader(open("Webgl_BookCrossing.csv","rb"))
book = [];
for lat,lon,freq in lines:
book.append([lat,lon,freq])
print book[12479]
book = book[1:-1]
book_np = np.array(book)
book_temp = book_np[:,2]
freq = [float(i) for i in book_temp]
Maxfreq = max(freq)
print Maxfreq
def sigmoid(x):
return 1 / (1 + math.exp(-x))
logistic = 1.0 / (0.5 + np.exp(-np.array(freq)))
logistic_freq = [float(i) for i in logistic]
print len(logistic_freq),len(book),len(freq)
print min(logistic_freq)
print max(logistic_freq)
def piecewise_linearity(x):
if x < 10.0:
return x/10.0
elif ((x >= 10.0) & (x < 100.0)):
return (1/90.0)*x + 8.0/9.0
elif x >= 100.0:
return (1.0/1000.0)*x + 1.90
piecewise_linearity(1100)
linear_map = [piecewise_linearity(i) for i in freq]
len(linear_map)
# +
f = open('BookCrossingData.json','w')
f.write('[') # python will convert \n to os.linesep
categories = ['book-crossing'];
for i in xrange(len(categories)):
f.write('["');f.write(categories[i]);f.write('",[')
for j in xrange(len(book)):
f.write(book[j][0])
f.write(',')
f.write(book[j][1])
f.write(',')
f.write(str(linear_map[j]/5))
if j != len(book)-1:
f.write(',')
f.write(']]')
if i == len(categories):
f.write(',')
f.write(']')
f.close()
# + active=""
# import csv
# lines = csv.reader(open("poverty.csv", "rb"))
# mpis = [] # Multidimensional Poverty Index
# thousands = [] # People, in thousands, in a poverty situation
# deprivations = [] # Intensity of Deprivation
# for lat, lon, mpi, thousand, deprivation in lines:
# mpis += (lat, lon, mpi)
# thousands += (lat, lon, thousand)
# deprivations += (lat, lon, deprivation)
# print """
# [
# ["Multidimensional Poverty Index", [%s]],
# ["People affected (in thousands)", [%s]],
# ["Intensity of Deprivation", [%s]]
# """ % (",".join(mpis),
# ",".join(thousands),
# ",".join(deprivations))
# - See more at: http://versae.blogs.cultureplex.ca/2011/11/07/creating-a-globe-of-data/#comments
| PlotCode/Book_Crossing/globe/generate_json.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sequence
# +
import json
import sys
from konfoo import *
# -
# ## YAML Support
import oyaml as yaml
def to_yaml(container, *attributes, **options):
flow_style = options.pop('flow_style', False)
return yaml.safe_dump(container.view_fields(*attributes, **options),
stream=sys.stdout,
default_flow_style=flow_style)
# ## Create a new Sequence
sequence = Sequence([Byte(), Unsigned8(), Decimal8(), Char()])
sequence.to_list()
sequence.to_csv()
sequence.to_json()
to_yaml(sequence, flow_style=False)
# ## Append a Member
sequence = Sequence()
sequence.append(Unsigned8())
sequence.to_list()
sequence.to_csv()
sequence.to_json()
to_yaml(sequence, flow_style=False)
# ## Insert a Member
sequence.insert(0, Byte())
sequence.to_list()
sequence.to_csv()
sequence.to_json()
to_yaml(sequence, flow_style=False)
# ## Extend a Sequence
sequence.extend([Decimal8(), Char()])
sequence.to_list()
sequence.to_csv()
sequence.to_json()
to_yaml(sequence, flow_style=False)
# ## Initialize a Sequence
sequence.initialize_fields([1, 2, 9, 'F'])
sequence.to_list()
sequence.to_csv()
sequence.to_json()
to_yaml(sequence, flow_style=False)
# ## Display a Sequence
sequence
# ## Metadata of a Sequence
sequence.describe()
json.dump(sequence.describe(), sys.stdout, indent=2)
d3flare_json(sequence.describe(), sys.stdout, indent=2)
# ## Size of a Sequence
sequence.container_size()
num_of_bytes, num_of_remaining_bits = sequence.container_size()
num_of_bytes
num_of_remaining_bits
# ## Indexing
sequence.to_list('index')
sequence.to_csv('index.byte', 'index.bit', 'index.address', fieldnames=('id', 'index', 'offset', 'address'))
sequence.to_json('index')
sequence.index_fields(index=Index())
sequence.index_fields()
sequence.to_list('index')
sequence.to_csv('index.byte', 'index.bit', 'index.address', fieldnames=('id', 'index', 'offset', 'address'))
sequence.to_json('index')
# ## De-Serializing
sequence.deserialize(bytes.fromhex('01020946f00f00'))
sequence.to_list()
sequence.to_csv()
sequence.to_json()
to_yaml(sequence, flow_style=False)
# ## Serializing
bytestream = bytearray()
bytestream.hex()
sequence.serialize(bytestream)
bytes(sequence).hex()
bytestream.hex()
# ## Sequence Members
# ### Number of Members
len(sequence)
# ### Access a Member
sequence[0]
# ### Access the Attributes of a Member Field
sequence[0].name
sequence[0].value
sequence[0].bit_size
sequence[0].alignment
sequence[0].alignment.byte_size
sequence[0].alignment.bit_offset
sequence[0].byte_order
sequence[0].byte_order.value
sequence[0].index
sequence[0].index.byte
sequence[0].index.bit
sequence[0].index.address
sequence[0].index.base_address
sequence[0].index.update
# ### List the Members
[member.item_type for member in sequence]
# ## Sequence Fields
# ### View Field Attributes
sequence.view_fields()
sequence.view_fields('name', 'bit_size', 'value', 'index', 'alignment', 'byte_order.name')
# ### View as a JSON string
sequence.to_json()
print(sequence.to_json(indent=2))
sequence.to_json('name', 'bit_size', 'value', 'index', 'alignment', 'byte_order')
print(sequence.to_json('name', 'bit_size', 'value', 'index', 'alignment', 'byte_order', indent=2))
# ### Write to a JSON file
sequence.write_json('Sequence.json')
# ### List the Field Items
sequence.field_items()
# ### List the Field Attributes
sequence.to_list()
sequence.to_list('name', 'bit_size', 'value', 'index', 'alignment', 'byte_order')
sequence.to_dict()
sequence.to_dict('name', 'bit_size', 'value', 'index', 'alignment', 'byte_order')
sequence.to_csv()
sequence.to_csv('name', 'bit_size', 'value', fieldnames=('id', 'type', 'size', 'value'))
# ### Write the Field Attributes to a `.csv` File
sequence.write_csv('Sequence.csv', 'name', 'bit_size', 'value', fieldnames=('id', 'type', 'size', 'value'))
# ### Save the Field Attributes to an `.ini` File
sequence.save('Sequence.ini')
# ### Load the Field Value from an `.ini` File
sequence.load('Sequence.ini')
| notebooks/containers/Sequence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="R5ow9ze2RCqP"
# Importing Dataset from Yahoo Finance
# + colab={"base_uri": "https://localhost:8080/", "height": 272} id="_uSNnnis9Tom" outputId="fc2f2387-d473-4dc5-e6be-a04af9954b81"
import pandas_datareader.data as web
from datetime import date
import datetime
start = datetime.datetime( 2006, 1, 2)
end = date.today()
stock = "HDFCBANK.NS"
data = web.DataReader( stock,"yahoo", start, end)
print(data)
# + [markdown] id="Ds8Z5DxUR0vt"
# Plotting Training Data : HDFC Stock
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="gS2XfEB2Rz-U" outputId="37c55c60-5d31-4609-dd06-10e5cddc07f1"
import matplotlib.pyplot as plt
plt.figure(figsize=(18,7))
plt.title("stock price trend")
plt.xlabel("Years")
plt.ylabel("Price")
plt.plot(data['Adj Close'], color= 'blue')
plt.show()
# + [markdown] id="F_NIT7dNRKvy"
# Dividing data into train/test sets; Scaling, Windowing and Reshaping Data
# + id="0HZvzDfvNNTC"
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
df = data['Adj Close'].values
df = df.reshape(-1, 1)
timestep = 50
dataset_train = np.array(df[:int(df.shape[0]*0.8)])
dataset_test = np.array(df[int(df.shape[0]*0.8)-timestep:])
scaler1 = MinMaxScaler(feature_range=(0,1))
dataset_train = scaler1.fit_transform(dataset_train)
dataset_test = scaler1.fit_transform(dataset_test)
def create_dataset(dataset, timestep):
x = []
y = []
for i in range(timestep, dataset.shape[0]):
x.append(dataset[i-timestep:i, 0])
y.append(dataset[i, 0])
x = np.array(x)
y = np.array(y)
return x,y
x_train, y_train = create_dataset( dataset_train, timestep)
x_test, y_test = create_dataset( dataset_test, timestep)
# Reshape features for LSTM Layer
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# + [markdown] id="xAvLDOHoe0sE"
# Building Model:
# 1. LSTM :
# Required Hyperparameter Tuning
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="544njmKue4NG" outputId="cdcf1b0f-ece8-45aa-b298-abf3b4b2b8a8"
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dense, Dropout
import os
model = Sequential()
model.add(LSTM(units=96, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=96, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=96))
model.add(Dense(units=1))
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mZguTzIzkbis" outputId="e41e1112-ef0b-4856-8945-8da36cbd6c60"
model.compile(loss='mean_squared_error', optimizer='adam', metrics='mse')
model.fit(x_train, y_train, epochs = 50, batch_size=32)
model.save('LSTMmodel.h5')
# + [markdown] id="isdoeCz5fQyP"
# Run on Test Data
# + id="iJNfNms3BfoL"
model = load_model('LSTMmodel.h5')
predictions = model.predict(x_test)
predictions = scaler1.inverse_transform(predictions)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="oAluOvynBiG-" outputId="117a8d7b-36bc-48db-b477-1bfa4a5931d3"
test_loss = model.evaluate(x_test, y_test)
# + [markdown] id="DmHEQSaxZYhk"
# Visualizing Results
# + colab={"base_uri": "https://localhost:8080/", "height": 856} id="YHc4ym5XfQZv" outputId="7c0fd3a1-e610-4688-e43a-6f8a88120d99"
fig, ax = plt.subplots(figsize=(18,7))
plt.plot(df, color='blue')
ax. plot(range(len(y_train)+timestep, len(y_train)+timestep+len(predictions)), predictions, color='red')
y_test_scaled = scaler.inverse_transform(y_test.reshape(-1,1))
fig, ax = plt.subplots(figsize=(18,7))
ax.plot( y_test_scaled, color='blue')
plt.plot(predictions, color='red')
# + [markdown] id="CfIWbIH4S2Fg"
# Predicting in Future
# + id="1kvcFoT9aNdY"
import pandas_datareader.data as web
from datetime import date
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
# + [markdown] id="MuPu2ldhYSnH"
# SELECT STOCK TO BE PREDICTED
#
#
# 1. Nifty 50 (^NSEI)
# 2. S&P BSE SENSEX (^BSESN)
# 3. Hindustan Unilever Limited (HINDUNILVR.NS)
# 4. Reliance Industries Limited (RELIANCE.NS)
# 5. Infosys Limited (INFY.NS)
# 6. Tata Consultancy Services Limited (TCS.NS)
# 7. HDFC Bank Limited (HDFCBANK.NS)
#
#
# + id="jyvpxEnYWAO1"
def select_stock(s):
if s == 1 :
stock = "^NSEI"
elif s == 2 :
stock = "^BSESN"
elif s == 3 :
stock = "HINDUNILVR.NS"
elif s == 4 :
stock = "RELIANCE.NS"
elif s == 5 :
stock = "INFY.NS"
elif s == 6 :
stock = "TCS.NS"
elif s == 7 :
stock = "HDFCBANK.NS"
return stock
# + [markdown] id="gCHySGnYe2FT"
# Getting Data of selected stock
# + id="nYP3Eb7NS1rO"
def get_data(stock, scaler1):
start = date.today() - datetime.timedelta(days=84)
end = date.today()
data = web.DataReader( stock,"yahoo", start, end)
df1 = data.reset_index()['Adj Close']
df1 = scaler1.fit_transform(np.array(df1).reshape(-1,1))
test_data = df1[-50:,:]
a_input=test_data[len(test_data) - 50:].reshape(1,-1)
t_input=list(x_input)
t_input=a_input[0].tolist()
return a_input, t_input
# + [markdown] id="hz6OgohsohQr"
# Prediting Stock Price for next 10 days
# + id="CLakTdwmTI0e"
def predict(a_input, t_input):
output=[]
steps=50
i=0
while(i<10):
if(len(t_input)>50):
a_input=np.array(t_input[1:])
a_input=a_input.reshape(1,-1)
a_input = a_input.reshape((1, steps, 1))
yhat = model.predict(a_input, verbose=0)
t_input.extend(yhat[0].tolist())
t_input=t_input[1:]
output.extend(yhat.tolist())
i=i+1
else:
a_input = a_input.reshape((1, steps,1))
yhat = model.predict(a_input, verbose=0)
t_input.extend(yhat[0].tolist())
output.extend(yhat.tolist())
i=i+1
return output
# + [markdown] id="3OLaYoBgfEPp"
# Plotting the Graph
# + id="rum3zebZBTBP"
import matplotlib.pyplot as plt
def plot(output, scaler1):
new_day=np.arange(1,51)
pred_day=np.arange(51,61)
plt.figure(figsize=(18,7))
plt.plot(new_day,scaler1.inverse_transform(df1[len(df1)- 50:]))
plt.plot(pred_day,scaler1.inverse_transform(output))
# + [markdown] id="H82BNIU-Z2Vr"
# Loading model for prediction
# + id="KqHXoJFFZ1by"
from keras.models import load_model
model = load_model('LSTMmodel.h5')
# + [markdown] id="cIDioAJrZU8k"
# NIFITY 50
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="K178b-Q8ZTXt" outputId="ae4b25c6-c3be-4ce4-83e7-781c2f4595a1"
nifty = select_stock(1)
a_input, t_input = get_data(nifty, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="qzyazB9pZ78h"
# S&P BSE SENSEX (^BSESN)
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="YiT2M22LZ9on" outputId="55b47849-3c48-4255-8832-223fa99bb5b4"
sensex = select_stock(2)
a_input, t_input = get_data(sensex, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="XlqWsUf2bhc1"
# Hindustan Unilever Limited (HINDUNILVR.NS)
#
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="8khFnNZpbpqt" outputId="9722ec46-fdf7-4c0f-f6fb-1e1a7988c645"
hindu = select_stock(3)
a_input, t_input = get_data(hindu, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="FdiOjU0GbqC2"
# Reliance Industries Limited (RELIANCE.NS)
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="zrQxD5N_bttf" outputId="829ccdf7-d2bf-4e74-d0e3-69467c5fc1c4"
reliance = select_stock(4)
a_input, t_input = get_data(reliance, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="VbCs6BfxbuGB"
# Infosys Limited (INFY.NS)
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="x8fidI6dbwlp" outputId="700979cf-fd51-47cf-eb80-ab43396afa02"
infy = select_stock(5)
a_input, t_input = get_data(infy, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="b5LqAEsKbw-d"
# Tata Consultancy Services Limited (TCS.NS)
#
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="4oFrB_uQbzhk" outputId="7aa47b80-0aa7-48e3-c507-02156d29af99"
tcs = select_stock(6)
a_input, t_input = get_data(tcs, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="9Yyezv8nbz5T"
# HDFC Bank Limited (HDFCBANK.NS)
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="hB-PdSj2b0xb" outputId="dfd7137e-3380-4d47-d61f-4137a6aeab3d"
hdfc = select_stock(7)
a_input, t_input = get_data(hdfc, scaler1)
output = predict(a_input, t_input)
plot(output, scaler1)
print(scaler1.inverse_transform(output))
# + [markdown] id="hCEgQkVfpBEG"
# Saving scaler1 using pickle
# + id="pdlZzARGpHRk"
import pickle
scalerfile = 'scaler1.sav'
pickle.dump(scaler1, open(scalerfile, 'wb'))
| Future_Stock_Predictor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy
import sklearn
import sklearn.metrics.pairwise
import string
# +
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from nltk import word_tokenize
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
# -
import nltk
nltk.download('stopwords')
nltk.download('punkt')
def process_text(text, stem=True):
""" Tokenize text and stem words removing punctuation """
text = text.translate(string.punctuation)
tokens = word_tokenize(text)
if stem:
stemmer = PorterStemmer()
tokens = [stemmer.stem(t) for t in tokens]
return tokens
# +
descriptions = []
with open('descriptions.txt', encoding = "utf8") as f:
for line in f:
text = line.lower() ## Lowercase all characters
text = text.replace("[comma]"," ") ## Replace [commas] with empty space
for ch in text:
if ch < "0" or (ch < "a" and ch > "9") or ch > "z": ## The cleaning operation happens here, remove all special characters
text = text.replace(ch," ")
text = ' '.join(text.split()) ## Remove double spacing from sentences
descriptions.append(text)
dataSet = numpy.array(descriptions)
# +
vectorizer = TfidfVectorizer(stop_words='english')
TfIdf_dataSet = vectorizer.fit_transform(dataSet)
print("What our Tf-Idf looks like: ")
print()
print(TfIdf_dataSet[0:1])
vectorVocab = vectorizer._validate_vocabulary()
# -
cosineSimilarity = sklearn.metrics.pairwise.cosine_similarity(TfIdf_dataSet)
print(cosineSimilarity)
numpy.fill_diagonal(cosineSimilarity,1.1)
cosineSimilaritySorted = numpy.argsort((-1*(cosineSimilarity)),axis=1)
print(cosineSimilaritySorted)
cosineSimilaritySorted = numpy.argsort((-1*(cosineSimilarity)),axis=1)
top5similar = (cosineSimilaritySorted[:,0:5])
print()
print(top5similar)
numpy.savetxt("results.csv", top5similar.astype(int), fmt='%i', delimiter=",")
| Old/.ipynb_checkpoints/Group assignment after tokenizing cleaned data_latest -checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
tf.__version__
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append("..")
import cv2
import numpy as np
from glob import glob
from models import Yolov4
model = Yolov4(weight_path='../yolov4.weights',
class_name_path='../class_names/coco_classes.txt')
model.predict('../img/street.jpeg', random_color=True)
| notebook/Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.family']='SimHei' #顯示中文
# %matplotlib inline
# -
import warnings
warnings.filterwarnings('ignore')
# ## 資料載入與探索
# Load in the train datasets
train = pd.read_csv('input/train.csv', encoding = "utf-8", dtype = {'type': np.int32})
test = pd.read_csv('input/test.csv', encoding = "utf-8")
submission = pd.read_csv('input/submission.csv', encoding = "utf-8", dtype = {'type': np.int32})
train.head(3)
test.head(3)
submission.head(3)
# ### One-hot Encoding
df1 = pd.get_dummies(train['屬種'])
df1.sample(5)
# ### LabelEncoding
df2 = train['屬種'].replace({'Iris-setosa':1,'Iris-versicolor':2,'Iris-virginica':3})
df2.sample(5)
# ## Data clean - 缺值處理
#missing data
miss_sum = train.isnull().sum().sort_values(ascending=False)
miss_sum
#查詢那幾筆是空值
print(train[train['花萼寬度'].isnull()])
print("--------------------------------")
print(train[train['花萼長度'].isnull()])
#直接把 NaN drop (如果筆數很少,不影響建模的時候)
train_d_na = train.dropna().reset_index(drop=True)
train_d_na.isnull().sum().sort_values(ascending=False)
# +
#將空值補平均數
#train.loc[train['花萼寬度'].isnull(),['花萼寬度']] = train['花萼寬度'].mean() #花萼寬度:第2欄
train[['花萼寬度']] = train[['花萼寬度']].fillna(np.mean(train[['花萼寬度']]))
train.plot(kind='line',y='花萼寬度',figsize=(10,6),fontsize=14,title='花萼寬度')
# +
#將空值補眾數
#train.loc[train['花萼長度'].isnull(),['花萼長度']] = train['花萼長度'].mode()[0] #花萼長度:第1欄
train[['花萼長度']] = train[['花萼長度']].fillna(train['花萼長度'].mode()[0])
train.plot(kind='line',y='花萼長度',figsize=(10,6),fontsize=14,title='花萼長度')
# -
from pandas.plotting import scatter_matrix
scatter_matrix( train[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']],figsize=(10, 10),color='b')
# ## 相關分析
corr = train[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']].corr()
print(corr)
# +
import seaborn as sns
plt.rcParams['font.family']='DFKai-SB' #顯示中文
plt.figure(figsize=(10,10))
sns.heatmap(corr, square=True, annot=True, cmap="RdBu_r") #center=0, cmap="YlGnBu"
#sns.plt.show()
# http://seaborn.pydata.org/tutorial/color_palettes.html
# -
# ## 離群值分析
# +
#train[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']]
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(10, 10), sharey=True)
axes[0, 0].boxplot(train['花萼寬度'],showmeans=True)
axes[0, 0].set_title('訓:花萼寬度')
axes[0, 1].boxplot(train['花瓣寬度'],showmeans=True)
axes[0, 1].set_title('訓:花瓣寬度')
axes[0, 2].boxplot(train['花瓣長度'],showmeans=True)
axes[0, 2].set_title('訓:花瓣長度')
axes[0, 3].boxplot(train['花萼長度'],showmeans=True)
axes[0, 3].set_title('訓:花萼長度')
axes[1, 0].boxplot(test['花萼寬度'],showmeans=True)
axes[1, 0].set_title('測:花萼寬度')
axes[1, 1].boxplot(test['花瓣寬度'],showmeans=True)
axes[1, 1].set_title('測:花瓣寬度')
axes[1, 2].boxplot(test['花瓣長度'],showmeans=True)
axes[1, 2].set_title('測:花瓣長度')
axes[1, 3].boxplot(test['花萼長度'],showmeans=True)
axes[1, 3].set_title('測:花萼長度')
# -
train.plot(kind='bar',y='花萼寬度',figsize=(30,6),fontsize=14,title='花萼寬度')
#IQR = Q3-Q1
IQR = np.percentile(train['花萼寬度'],75) - np.percentile(train['花萼寬度'],25)
#outlier = Q3 + 1.5*IQR , or. Q1 - 1.5*IQR
train[train['花萼寬度'] > np.percentile(train['花萼寬度'],75)+1.5*IQR]
#outlier = Q3 + 1.5*IQR , or. Q1 - 1.5*IQR
train[train['花萼寬度'] < np.percentile(train['花萼寬度'],25)-1.5*IQR]
# +
#fix_X = X.drop(X.index[[5,23,40]])
#fix_y = y.drop(y.index[[5,23,40]])
# -
# ## 切分資料 (從官方的training data切分出來)
#把示範用的 type 4, 資料去除, 以免干擾建模
train = train[train['type']!=4]
# +
from sklearn.model_selection import train_test_split
X = train[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']]
y = train['type']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=100)
# -
# ## 標準化
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_train_std[0:5]
y_test[0:5]
# ## 建立初步模型
# ## KNN
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
knn = KNeighborsClassifier(n_neighbors=3, weights='uniform')
knn.fit(X_train_std, y_train)
print(metrics.classification_report(y_test, knn.predict(X_test_std)))
print(metrics.confusion_matrix(y_test, knn.predict(X_test_std)))
# -
# ## Random Forest
# +
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=500, criterion='gini', max_features='auto', oob_score=True)
rfc.fit(X_train, y_train) #不標準化
print("oob_score(accuary):",rfc.oob_score_)
print(metrics.classification_report(y_test, rfc.predict(X_test)))
# -
# ## 貝式分類器
# +
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train_std, y_train)
print(metrics.classification_report(y_test, gnb.predict(X_test_std)))
print(metrics.confusion_matrix(y_test, gnb.predict(X_test_std)))
# -
# ## SVM
# +
from sklearn.svm import SVC
svc = SVC(C=1.0, kernel="rbf", probability=True)
svc.fit(X_train_std, y_train)
print(metrics.classification_report(y_test, svc.predict(X_test_std)))
print(metrics.confusion_matrix(y_test, svc.predict(X_test_std)))
# -
# ## Stacking
# website: http://rasbt.github.io/mlxtend/
# +
#from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import StackingClassifier
import xgboost as xgb
clf1 = KNeighborsClassifier(n_neighbors=3, weights='uniform')
clf2 = RandomForestClassifier(n_estimators=500, criterion='gini', max_features='auto', oob_score=True)
clf3 = GaussianNB()
clf4 = SVC(C=1.0, kernel="rbf", probability=True)
meta_clf = xgb.XGBClassifier(n_estimators= 2000, max_depth= 4)
stacking_clf = StackingClassifier(classifiers=[clf1, clf2, clf3, clf4], meta_classifier=meta_clf)
clf1.fit(X_train_std, y_train)
clf2.fit(X_train, y_train)
clf3.fit(X_train_std, y_train)
clf4.fit(X_train_std, y_train)
stacking_clf.fit(X_train_std, y_train)
print('KNN Score:',clf1.score(X_test_std, y_test))
print('RF Score:',clf2.score(X_test, y_test))
print('GNB Score:',clf3.score(X_test_std, y_test))
print('SVC Score:',clf4.score(X_test_std, y_test))
print('Stacking Score:',stacking_clf.score(X_test_std, y_test))
# -
# ## XGBoost
#
# 詳細說明:
# <p>(ENG) https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/</p>
# <p>(CHT) http://www.itread01.com/articles/1476146171.html</p>
# +
import xgboost as xgb
gbm = xgb.XGBClassifier(n_estimators= 2000, max_depth= 4).fit(X_train, y_train)
print(metrics.classification_report(y_test, gbm.predict(X_test)))
print("Score:", gbm.score(X_test, y_test))
# -
print(gbm.feature_importances_)
from xgboost import plot_importance
plot_importance(gbm, )
plt.show()
pred = gbm.predict(test[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']])
pred
# Generate Submission File
StackingSubmission = pd.DataFrame({ 'id': submission.id, 'type': pred })
StackingSubmission.to_csv("submission.csv", index=False)
submission = pd.read_csv('submission.csv', encoding = "utf-8", dtype = {'type': np.int32})
submission
test[20:30]
# ## 測試資料集的預測結果比較
#使用先前 training set的scale fit做縮放
test_std = sc.transform(test[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']])
submission_stk = stacking_clf.predict(test_std)
submission_stk
submission_rfc = rfc.predict(test[['花瓣寬度','花瓣長度','花萼寬度','花萼長度']])
submission_rfc
submission_knn =knn.predict(test_std)
submission_knn
submission_gnb = gnb.predict(test_std)
submission_gnb
submission_svc = svc.predict(test_std)
submission_svc
# +
from sklearn.ensemble import VotingClassifier
clf1 = knn
clf2 = rfc
clf3 = gnb
clf4 = svc
eclf = VotingClassifier(estimators=[('knn', clf1), ('rfc', clf2),('gnb', clf3),('svc',clf4)], voting='hard', weights=[1, 1, 1, 4])
eclf.fit(X_train_std, y_train)
print(metrics.classification_report(y_test, eclf.predict(X_test_std)))
# -
submission_eclf = eclf.predict(test_std)
submission_eclf
| kaggle_intro/kaggle_intro_iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="B8HaB1kI42fl" colab_type="code" outputId="5b78382c-56e8-4e88-f8b6-463ca3a3b8a0" executionInfo={"status": "ok", "timestamp": 1583698762286, "user_tz": 0, "elapsed": 24689, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 676}
# !pip install --upgrade tables
# !pip install eli5
# !install xgboost
# !pip install hyperopt
# + id="PTzy-u1BQowa" colab_type="code" outputId="b0d6d723-88b9-483c-cd24-0056327bbadd" executionInfo={"status": "ok", "timestamp": 1583698877632, "user_tz": 0, "elapsed": 9527, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 172}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
import eli5
from eli5.sklearn import PermutationImportance
from hyperopt import hp, fmin, tpe, STATUS_OK
# + id="4YywDBOvSTEJ" colab_type="code" outputId="6eeb81ac-4449-4616-d726-dc819dda4eb2" executionInfo={"status": "ok", "timestamp": 1583698982455, "user_tz": 0, "elapsed": 1689, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car"
# + id="qYwH5eurSWO-" colab_type="code" outputId="1f914567-9f7d-4d4a-a8b5-3b69c4a201c8" executionInfo={"status": "ok", "timestamp": 1583698998739, "user_tz": 0, "elapsed": 13330, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 70}
# !ls
# + [markdown] id="OdLJD5877K8s" colab_type="text"
# ## Reading data
# + id="PyPSlRkiUM2H" colab_type="code" outputId="c45b46c1-43fc-4329-ed10-8f8a9001ad90" executionInfo={"status": "ok", "timestamp": 1583699008921, "user_tz": 0, "elapsed": 8473, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df = pd.read_hdf('data/car.h5')
df.shape
# + id="PaH2QWxnXDFU" colab_type="code" colab={}
#df.columns.values
# + [markdown] id="xSj29Y6Z73zm" colab_type="text"
# ## Dummy Model
# + id="tKfloDq7IPWk" colab_type="code" outputId="afbab402-dc5d-4d9e-b24d-2330cb56f9b5" executionInfo={"status": "ok", "timestamp": 1583684971279, "user_tz": 0, "elapsed": 1006, "user": {"displayName": "<NAME>015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
feats = ['car_id']
X = df[ feats ].values
y = df['price_value'].values
model = DummyRegressor()
model.fit(X, y)
y_pred = model.predict(X)
mae(y, y_pred)
# + id="efqTOvhO_nc6" colab_type="code" outputId="076c1d5a-ea65-46c8-978e-1139d2ae00eb" executionInfo={"status": "ok", "timestamp": 1583699018761, "user_tz": 0, "elapsed": 1433, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Remove prices in currencies different than PLN
df = df[ df['price_currency'] == 'PLN' ]
df.shape
# + id="PQs7i4Wa_3Su" colab_type="code" colab={}
SUFIX_CAT = '_cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[ feat ].factorize()[0]
if SUFIX_CAT in feat:
df[feat] = factorized_value
else:
df[feat + SUFIX_CAT] = factorized_values
# + id="cTzyAAmxAW4-" colab_type="code" outputId="9c67c53d-e31a-4423-e880-2e0ad1c6ffcf" executionInfo={"status": "ok", "timestamp": 1583699176793, "user_tz": 0, "elapsed": 867, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
cat_feats = [x for x in df.columns if SUFIX_CAT in x]
cat_feats = [x for x in cat_feats if 'price' not in x]
len(cat_feats)
# + id="rWX5VX-0E0Wl" colab_type="code" colab={}
def run_model(model, feats):
X = df[ feats ].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="Qc5pgDNnPrvy" colab_type="code" colab={}
#Decision tree
# + id="NtD3-HIPPn8m" colab_type="code" outputId="d9705841-bac7-493a-997a-7c5c16c3643a" executionInfo={"status": "ok", "timestamp": 1583689520986, "user_tz": 0, "elapsed": 4706, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
run_model(DecisionTreeRegressor(max_depth=5), cat_feats)
# + id="r5Z4jQpyQRvM" colab_type="code" outputId="cf34b498-200c-45f0-92f8-c3776b158735" executionInfo={"status": "ok", "timestamp": 1583689886420, "user_tz": 0, "elapsed": 100893, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
#Random Forest
model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)
run_model(model, cat_feats)
# + id="WwXY3myxRRVG" colab_type="code" outputId="c44bc508-80b4-47fb-9176-7f47810e5107" executionInfo={"status": "ok", "timestamp": 1583690336601, "user_tz": 0, "elapsed": 60716, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 87}
#XGBoost
xgb_params = {
'max_depth':5,
'n_estimators':50,
'learning_rate':0.1,
'seed':0,
}
model = xgb.XGBRegressor(**xgb_params)
run_model(model, cat_feats)
# + id="cIJ6OcquFw0Y" colab_type="code" outputId="be6e679a-6bd6-400e-afa1-7dda01e00890" executionInfo={"status": "ok", "timestamp": 1583690726221, "user_tz": 0, "elapsed": 356310, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 401}
xgb_params = {
'max_depth':5,
'n_estimators':50,
'learning_rate':0.1,
'seed':0,
}
m = xgb.XGBRegressor(**xgb_params)
m.fit(X, y)
imp = PermutationImportance(m, random_state=0).fit(X, y)
eli5.show_weights(imp, feature_names=cat_feats)
# + id="gPcyZTEZS9tn" colab_type="code" outputId="95331e99-902d-4a60-d51c-b8248f8f9238" executionInfo={"status": "ok", "timestamp": 1583699201671, "user_tz": 0, "elapsed": 1013, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 141}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x)=='None' else int(x) )
df['param_rok-produkcji'].unique()
# + id="gpyJWGNPbzno" colab_type="code" outputId="6778615f-163b-4bb4-f034-5b7ae77a04da" executionInfo={"status": "ok", "timestamp": 1583692597234, "user_tz": 0, "elapsed": 1021, "user": {"displayName": "<NAME>015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 230}
df['param_moc']
# + id="QScAYsbmZzin" colab_type="code" colab={}
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x)=='None' else str(x).split(' ')[0])
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x)=='None' else str(x).split('cm')[0].replace(' ',''))
# + id="Iby_fMQiUztN" colab_type="code" colab={}
feats = ['param_napęd_cat', 'param_rok-produkcji', 'param_stan_cat', 'param_skrzynia-biegów_cat', 'param_faktura-vat_cat', 'param_moc',
'param_marka-pojazdu_cat','param_typ_cat', 'feature_kamera-cofania_cat', 'param_pojemność-skokowa', 'seller_name_cat', 'param_kod-silnika_cat',
'feature_wspomaganie-kierownicy_cat', 'feature_asystent-pasa-ruchu_cat', 'feature_regulowane-zawieszenie_cat',
'feature_system-start-stop_cat', 'feature_światła-led_cat']
# + id="YlhK3BKaW_Eb" colab_type="code" outputId="082b3556-901d-4da0-c7be-ced36dd76d3a" executionInfo={"status": "ok", "timestamp": 1583699442872, "user_tz": 0, "elapsed": 13231, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}} colab={"base_uri": "https://localhost:8080/", "height": 87}
xgb_params = {
'max_depth':5,
'n_estimators':50,
'learning_rate':0.1,
'seed':0,
}
model = xgb.XGBRegressor(**xgb_params)
run_model(model, feats)
# + id="rAPwKElvc3p5" colab_type="code" colab={}
def obj_func(params):
print('Training with params: ')
print(params)
mean_mae, score_std = run_model(xgb.XGBRegressor(**xgb_params), feats)
return {'loss': np.abs(mean_mae), 'status': STATUS_OK}
# + id="GVE-lzDk36Tw" colab_type="code" colab={}
#space
xgb_reg_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),
'subsample': hp.quniform('subsample', 0.05, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.05, 1, 0.05),
'objective': 'reg:squarederror',
'n_estimators': 100,
'seed': 0,
}
# + id="wEuZtGZ57oQd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="d4b65e66-4825-4529-f9a1-466d0bec2af9" executionInfo={"status": "ok", "timestamp": 1583702126050, "user_tz": 0, "elapsed": 112507, "user": {"displayName": "<NAME>\u015bniak", "photoUrl": "", "userId": "11771994214049506191"}}
#run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=10)
best
| day5_HyperOpt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: DrugShot
# language: python
# name: drugshot
# ---
# # DrugShot
# DrugShot searches PubMed for articles that co-mention any search term and terms that describe drugs.
#
# It then prioritizes these drugs using various methods, and predicts additional drugs based on shared properties among drugs and other small molecules.
#
# #%%appyter init
from appyter import magic
magic.init(lambda _=globals: _())
# +
import os
import sys
import zipfile
import datetime
import pandas as pd
import numpy as np
# Display / graphing
from IPython.display import display, HTML
import plotly.express as px
# API access
import requests
import io
import time
# +
# Notebook display util functions
def make_clickable(link):
return f'<a target="_blank" href="{link}">{link}</a>'
table_number = 0
figure_number = 0
def figure_header(label,title):
global table_number
global figure_number
if label == 'Table':
table_number += 1
label = f'Table {table_number}'
elif label == 'Figure':
figure_number += 1
label = f'Figure {figure_number}'
display(HTML(f"<div style='font-size:2rem; padding:1rem 0;'><b>{label}</b>: {title}</div>"))
def figure_legend(label,title,content=''):
global table_number
global figure_number
if label == 'Table':
label = f'Table {table_number}'
elif label == 'Figure':
label = f'Figure {figure_number}'
display(HTML(f'<style>div.caption {{text-align: center;}}</style><div class=caption><b>{label}</b>: <i>{title}</i>. {content} </div>'))
# +
# %%appyter hide_code
{% do SectionField(name='section1',
title = '1. Submit Your Biomedical Term of Interest:')%}
{% do SectionField(name='section2',
title = '2. Chooose Number of Top Associated Drugs to Make Predictions:')%}
# +
# %%appyter hide_code
{% set term = StringField(name='input_term',
label='Biomedical Term',
default='Lung cancer',
description='Input your biomedical term of interest.',
section = 'section1') %}
{% set set_size = IntField(name = 'set_size',
label = 'Associated drug set size',
min = 50,
max = 200,
default = 50,
description = 'Size of drug set used for predictions of additional compounds',
section = 'section2') %}
# -
# ### Load DrugRIF
DrugRIF = pd.read_csv('https://appyters.maayanlab.cloud/storage/DrugShot/DrugRIF.tsv.gz',sep = '\t', usecols = ['name','PMID']).set_index('name')
# ### Query Term Using PubMed Eutils API
# +
# %%appyter code_exec
i = 0
pubmed_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term={}&retmax=100000&retstart={}&retmode=json"
results = []
res = requests.get(pubmed_url.format({{term}}, i)).json()
while i <= int(res['esearchresult']['count']):
results.extend(requests.get(pubmed_url.format({{term}},i)).json()['esearchresult']['idlist'])
i += len(res['esearchresult']['idlist'])
time.sleep(0.2)
# -
# ### Associated Drug Table
# +
# Retrieve top 500 associated drugs from DrugRIF based on search term co-mentions in the literature
df_associated = pd.DataFrame(DrugRIF[DrugRIF['PMID'].isin(results) == True].index.value_counts())\
.rename(columns = {'name':'Publications with Search Term'})[0:500]
# Get counts of each chemical in DrugRIF
chemical_counts = []
for chemical in df_associated.index:
chemical_counts.append(DrugRIF.index.tolist().count(chemical))
df_associated['Publications with Search Term / Total Publications'] = df_associated.div(chemical_counts, axis=0)
df_associated['Score'] = df_associated['Publications with Search Term'] * df_associated['Publications with Search Term / Total Publications']
# +
# %%appyter code_exec
associated_table = df_associated.sort_values(by = ['Publications with Search Term'], ascending = False)
associated_table.to_csv({{term}}.replace(' ','_')+'_associated_drug_table.csv')
figure_header('Table', 'Top Associated Compounds ({})'.format(make_clickable({{term}}.replace(' ','_')+'_associated_drug_table.csv')))
display(associated_table[associated_table.columns[0:2]].head(20))
figure_legend('Table', 'Top 20 Drugs associated with '+{{term}})
# -
# ### Scatter Plot of Drug Frequency in Literature
# %%appyter code_exec
fig = px.scatter(df_associated.reset_index().rename(columns = {'index':'chemical'}),
x = 'Publications with Search Term',
y = 'Publications with Search Term / Total Publications',
hover_name = 'chemical',
title={{term}})
fig.show()
# +
# %%appyter markdown
### Predicted DrugRIF Co-occurence Table
The top {{ set_size }} drugs from the associated drug table are ranked by the product of (Publications with Search Term) * (Publication with Search Term / Total Publications) and treated as an unweighted drug set
Predicted compounds are computed based on average co-occurence counts of PubMedIDs between the unweighted drug set and other drugs & small molecules within DrugRIF
# -
# Load cooccurrence matrix into pandas DataFrame
df_cooccurence = pd.read_csv('https://appyters.maayanlab.cloud/storage/DrugShot/DrugRIF_cooccurence_matrix.tsv.gz',
sep = '\t',
index_col = 0)
# %%appyter code_exec
# Calculate average co-occurrence for each drug with the associated drug set
df_cooccurence = df_cooccurence.loc[df_cooccurence.index.isin(associated_table.index[0:{{set_size}}])]
df_cooccurence.drop(columns = associated_table.index[0:{{set_size}}].tolist(), inplace = True)
df_cooccurence.loc['Score'] = df_cooccurence[df_cooccurence.columns].mean()
df_cooccurence.sort_values(by = ['Score'], axis = 1, ascending = False, inplace = True)
# %%appyter code_exec
predicted_table = pd.DataFrame(df_cooccurence.loc['Score'][0:200])
predicted_table.to_csv({{term}}.replace(' ','_')+'_cooccurence_similarity_predicted_drug_table.csv')
figure_header('Table', 'Top Predicted Compounds ({})'.format(make_clickable({{term}}.replace(' ','_')+'_cooccurence_similarity_predicted_drug_table.csv')))
display(predicted_table.head(20))
figure_legend('Table', 'Top 20 drugs predicted to be associated with {} based on DrugRIF co-occurence'.format({{term}}))
# +
# %%appyter markdown
### Predicted L1000 Co-expression Table
The top {{ set_size }} drugs from the associated drug table are ranked by the product of (Publications with Search Term) * (Publication with Search Term / Total Publications) and treated as an unweighted drug set
Predicted compounds are computed based on average cosine similarity of drug-induced L1000 gene expression signatures between the unweighted drug set and thousands of approved and experimental small molecules from [SEP-L1000](https://maayanlab.net/SEP-L1000/index.html)
# +
# Load correlation matrix into pandas DataFrame
response = requests.get('https://appyters.maayanlab.cloud/storage/DrugShot/L1000_similarity_matrix.npz')
coexpression_matrix = np.load(io.BytesIO(response.content), allow_pickle = True)
df_coexpression = pd.DataFrame(data = coexpression_matrix['correlations'], columns = coexpression_matrix['index'],
index = coexpression_matrix['index'])
df_coexpression[df_coexpression.columns] = df_coexpression[df_coexpression.columns].replace({1:np.nan})
# -
# %%appyter code_exec
# Calculate average similarity for each drug with the associated drug set
df_coexpression = df_coexpression.loc[df_coexpression.index.isin(associated_table.index[0:{{set_size}}])]
df_coexpression.loc['Score'] = df_coexpression[df_coexpression.columns].mean()
df_coexpression.sort_values(by = ['Score'], axis = 1, ascending = False, inplace = True)
# %%appyter code_exec
predicted_table = pd.DataFrame(df_coexpression.loc['Score'][0:200])
predicted_table.to_csv({{term}}.replace(' ','_')+'_coexpression_similarity_predicted_drug_table.csv')
figure_header('Table', 'Top Predicted Compounds ({})'.format(make_clickable({{term}}.replace(' ','_')+'_coexpression_similarity_predicted_drug_table.csv')))
display(predicted_table.head(20))
figure_legend('Table', 'Top 20 drugs predicted to be associated with {} based on coexpression'.format({{term}}))
# +
# %%appyter markdown
### Predicted Tanimoto Structural Similarity Table
The top {{ set_size }} drugs from the associated drug table are ranked by the product of (Publications with Search Term) * (Publication with Search Term / Total Publications) and treated as an unweighted drug set
Predicted compounds are computed based on average Tanimoto similarity between the unweighted drug set and all other drugs & small molecules included in DrugRIF
# +
# Load tanimoto similarity matrix into pandas DataFrame
response = requests.get('https://appyters.maayanlab.cloud/storage/DrugShot/Tanimoto_similarity_matrix.npz')
tanimoto_matrix = np.load(io.BytesIO(response.content), allow_pickle = True)
df_tanimoto = pd.DataFrame(data = tanimoto_matrix['correlations'], columns = tanimoto_matrix['index'],
index = tanimoto_matrix['index'])
df_tanimoto[df_tanimoto.columns] = df_tanimoto[df_tanimoto.columns].replace({1:np.nan})
# -
# %%appyter code_exec
# Calculate average similarity for each drug with the associated drug set
df_tanimoto = df_tanimoto.loc[df_tanimoto.index.isin(associated_table.index[0:{{set_size}}])]
df_tanimoto.drop(columns = associated_table.index[0:{{set_size}}].tolist(), inplace = True)
df_tanimoto.loc['Score'] = df_tanimoto[df_tanimoto.columns].mean()
df_tanimoto.sort_values(by = ['Score'], axis = 1, ascending = False, inplace = True)
# %%appyter code_exec
predicted_table = pd.DataFrame(df_tanimoto.loc['Score'][0:200])
predicted_table.to_csv({{term}}.replace(' ','_')+'_tanimoto_similarity_predicted_drug_table.csv')
figure_header('Table', 'Top Predicted Compounds ({})'.format(make_clickable({{term}}.replace(' ','_')+'_tanimoto_similarity_predicted_drug_table.csv')))
display(predicted_table.head(20))
figure_legend('Table', 'Top 20 drugs predicted to be associated with {} based on chemical structure similarity'.format({{term}}))
| appyters/DrugShot/DrugShot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example of performing linear least squares fitting
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Now, let's generate some random data about a trend line.
# +
# set a random number seed
np.random.seed(119)
# set number of data points
npoints = 50
# set x
x = np.linspace(0,10.,npoints)
# set slope, intercept, and scatter rms
m = 2.0
b = 1.0
sigma = 2.0
# generate y points
y = m*x + b + np.random.normal(scale=sigma, size=npoints)
y_err = np.full(npoints,sigma)
# -
# ### Let's just plot the data first
f = plt.figure(figsize=(7,7))
plt.errorbar(x,y,sigma,fmt='o')
plt.xlabel('x')
plt.ylabel('y')
# ### Method #1, polyfit()
# +
m_fit, b_fit = np.poly1d(np.polyfit(x,y,1, w=1./y_err)) # weight with uncertainties
print(m_fit, b_fit)
y_fit = m_fit * x + b_fit # the model, the straight line
# -
# ### Plot result
f = plt.figure(figsize=(7,7))
plt.errorbar(x,y,yerr=y_err, fmt='o', label='data')
plt.plot(x,y_fit, label='fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2,frameon=False)
# ### A new hope: liear regression
# +
m_A = 0.0
m_B = 0.0
m_C = 0.0
m_D = 0.0
m_A = np.sum(x*y)
m_B = np.sum(x)*np.sum(y)
m_C = np.sum(x*x)
m_D = np.sum(x)**2
m_fit_lr = ( float(npoints)*m_A - m_B )/( float(npoints)*m_C - m_D)
y_mean = np.mean(y)
x_mean = np.mean(x)
b_fit_lr = y_mean - m_fit_lr*x_mean
y_fit_lr = m_fit_lr * x + b_fit_lr
print(m_fit_lr, b_fit_lr)
# -
# ### Plot the result
f = plt.figure(figsize=(7,7))
plt.errorbar(x,y,yerr=y_err,fmt='o',label='data')
plt.plot(x,y_fit_lr,'o',label='linear reg')
plt.plot(x,y_fit,label='fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2, frameon=False)
| linear_regression_line_fitting2.ipynb |
# ---
# title: "Adding And Subtracting Matrices"
# author: "<NAME>"
# date: 2017-12-20T11:53:49-07:00
# description: "How to add and subtract matrices in Python."
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Preliminaries
# Load library
import numpy as np
# ## Create Matrices
# +
# Create matrix
matrix_a = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
# Create matrix
matrix_b = np.array([[1, 3, 1],
[1, 3, 1],
[1, 3, 8]])
# -
# ## Add Matrices
# Add two matrices
np.add(matrix_a, matrix_b)
# ## Subtract Matrices
# Subtract two matrices
np.subtract(matrix_a, matrix_b)
| docs/machine_learning/vectors_matrices_and_arrays/adding_and_subtracting_matrices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
data_path = "/home/yanjun/projects/dataset/kagglecatsanddogs_3367a/PetImages"
categories = ["Dog", "Cat"]
# +
# 定义图像为50*50的大小
img_size = 50
train_data = []
def create_train_data():
for category in categories:
path = os.path.join(data_path, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (img_size, img_size))
train_data.append([new_array, class_num])
except Exception as e:
pass
create_train_data()
# -
print(len(train_data))
# +
import random
random.shuffle(train_data)
# +
X,y
for feature, label in train_data:
X.append(feature)
y.append(label)
X = np.array(X).reshape(-1, img_size, img_size, 1) # 最后的1代表grayscale 只有1个通道
# +
import pickle
# pickle保存数据
pickle_out = open("X.pickle", "wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle", "wb")
pickle.dump(y, pickle_out)
pickle_out.close()
# pickle读取数据
#pickle_in = open("X.pickle", "rb")
#X = pickle.load(pickle_in)
# -
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
X = pickle.load(open("X.pickle","rb"))
y = pickle.load(open("y.pickle","rb"))
X = X / 255
# +
import keras
model = keras.models.Sequential()
model.add(Conv2D(64, (3, 3), input_shape = X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten()) #Conv Layer是2D, DenseLayer是1D的 所以需要将ConvLayer压平
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(loss="binary_crossentropy",
optimizer="adam",
metrics=["accuracy"]) # 可以使用categorical_crossentropy作为损失函数
model.fit(X, y, batch_size =32, epochs=10, validation_split=0.1)
# -
| learn/.ipynb_checkpoints/cnn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install pandas matplotlib sklearn numpy
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
df = pd.read_csv('pass.csv', header=0, dtype={
'file_name': str,
'file_size': int,
'run_time': float,
'time_taken_angr': float,
'time_taken_klee': float,
'passwords': str,
'is_password_cracked_by_angr': bool,
'password_generated_by_angr': str,
'is_password_cracked_by_klee': bool,
'password_generated_by_klee': str,
'file_path': str
})
df[['_' ,'_' , '_', '_', '_', '_', '_', '_', 'combination', 'obfuscation', '_']] = df.file_path.str.split('/', expand=True)
df.combination = df.combination.astype('category')
df.obfuscation = df.obfuscation.astype('category')
# Load CSV and columns
Y = df[(df.combination == 'pass9') & (df.obfuscation == 'D')].time_taken_klee
X = df[(df.combination == 'pass9') & (df.obfuscation == 'D')].file_size
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
sample_size = 99
# Split the data into training/testing sets
X_train = X[:-sample_size]
X_test = X[-sample_size:]
# Split the targets into training/testing sets
Y_train = Y[:-sample_size]
Y_test = Y[-sample_size:]
# Plot outputs
plt.scatter(X_test, Y_test, color='#f39c12')
plt.title('Obfuscation executive - D')
plt.xlabel('File size')
plt.ylabel('Time taken to crack by Klee')
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='#c0392b',linewidth=2)
#plt.show()
def stats(file_size, actual_time):
predict_time = regr.predict([[file_size]])[0][0]
error = (predict_time - actual_time) / actual_time * 100
accuracy = 100 - error
print('file size = {}, acutal time = {:.2f}, predict time = {:.2f}, accuracy = {:.2f}%, error = {:.2f}%'.format(
file_size, actual_time, predict_time, accuracy, error
))
stats(17591, 1.98)
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
df = pd.read_csv('pass.csv', header=0, dtype={
'file_name': str,
'file_size': int,
'run_time': float,
'time_taken_angr': float,
'time_taken_klee': float,
'passwords': str,
'is_password_cracked_by_angr': bool,
'password_generated_by_angr': str,
'is_password_cracked_by_klee': bool,
'password_generated_by_klee': str,
'file_path': str
})
df[['_' ,'_' , '_', '_', '_', '_', '_', '_', 'combination', 'obfuscation', '_']] = df.file_path.str.split('/', expand=True)
df.combination = df.combination.astype('category')
df.obfuscation = df.obfuscation.astype('category')
# Load CSV and columns
Y = df[(df.combination == 'pass9') & (df.obfuscation == 'CAD')].time_taken_klee
X = df[(df.combination == 'pass9') & (df.obfuscation == 'CAD')].file_size
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
sample_size = 99
# Split the data into training/testing sets
X_train = X[:-sample_size]
X_test = X[-sample_size:]
# Split the targets into training/testing sets
Y_train = Y[:-sample_size]
Y_test = Y[-sample_size:]
# Plot outputs
plt.scatter(X_test, Y_test, color='#f39c12')
plt.title('Obfuscation executive - CAD')
plt.xlabel('File size')
plt.ylabel('Time taken to crack by Klee')
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='#c0392b',linewidth=2)
#plt.show()
def stats(file_size, actual_time):
predict_time = regr.predict([[file_size]])[0][0]
error = (predict_time - actual_time) / actual_time * 100
accuracy = 100 - error
print('file size = {}, acutal time = {:.2f}, predict time = {:.2f}, accuracy = {:.2f}%, error = {:.2f}%'.format(
file_size, actual_time, predict_time, accuracy, error
))
stats(165028, 3.20)
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
df = pd.read_csv('pass.csv', header=0, dtype={
'file_name': str,
'file_size': int,
'run_time': float,
'time_taken_angr': float,
'time_taken_klee': float,
'passwords': str,
'is_password_cracked_by_angr': bool,
'password_generated_by_angr': str,
'is_password_cracked_by_klee': bool,
'password_generated_by_klee': str,
'file_path': str
})
df[['_' ,'_' , '_', '_', '_', '_', '_', '_', 'combination', 'obfuscation', '_']] = df.file_path.str.split('/', expand=True)
df.combination = df.combination.astype('category')
df.obfuscation = df.obfuscation.astype('category')
# Load CSV and columns
Y = df[(df.combination == 'pass9') & (df.obfuscation == 'VACD')].time_taken_klee
X = df[(df.combination == 'pass9') & (df.obfuscation == 'VACD')].file_size
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
sample_size = 99
# Split the data into training/testing sets
X_train = X[:-sample_size]
X_test = X[-sample_size:]
# Split the targets into training/testing sets
Y_train = Y[:-sample_size]
Y_test = Y[-sample_size:]
# Plot outputs
plt.scatter(X_test, Y_test, color='#f39c12')
plt.title('Obfuscation executive - VACD')
plt.xlabel('File size')
plt.ylabel('Time taken to crack by Klee')
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='#c0392b',linewidth=2)
#plt.show()
def stats(file_size, actual_time):
predict_time = regr.predict([[file_size]])[0][0]
error = (predict_time - actual_time) / actual_time * 100
accuracy = 100 - error
print('file size = {}, acutal time = {:.2f}, predict time = {:.2f}, accuracy = {:.2f}%, error = {:.2f}%'.format(
file_size, actual_time, predict_time, accuracy, error
))
stats(4242392, 11.50)
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
df = pd.read_csv('pass.csv', header=0, dtype={
'file_name': str,
'file_size': int,
'run_time': float,
'time_taken_angr': float,
'time_taken_klee': float,
'passwords': str,
'is_password_cracked_by_angr': bool,
'password_generated_by_angr': str,
'is_password_cracked_by_klee': bool,
'password_generated_by_klee': str,
'file_path': str
})
df[['_' ,'_' , '_', '_', '_', '_', '_', '_', 'combination', 'obfuscation', '_']] = df.file_path.str.split('/', expand=True)
df.combination = df.combination.astype('category')
df.obfuscation = df.obfuscation.astype('category')
# Load CSV and columns
Y = df[(df.combination == 'pass9') & (df.obfuscation == 'D')].time_taken_angr
X = df[(df.combination == 'pass9') & (df.obfuscation == 'D')].file_size
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
sample_size = 99
# Split the data into training/testing sets
X_train = X[:-sample_size]
X_test = X[-sample_size:]
# Split the targets into training/testing sets
Y_train = Y[:-sample_size]
Y_test = Y[-sample_size:]
# Plot outputs
plt.scatter(X_test, Y_test, color='#f39c12')
plt.title('Obfuscation executive - D')
plt.xlabel('File size')
plt.ylabel('Time taken to crack by Angr')
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='#c0392b',linewidth=2)
#plt.show()
def stats(file_size, actual_time):
predict_time = regr.predict([[file_size]])[0][0]
error = (predict_time - actual_time) / actual_time * 100
accuracy = 100 - error
print('file size = {}, acutal time = {:.2f}, predict time = {:.2f}, accuracy = {:.2f}%, error = {:.2f}%'.format(
file_size, actual_time, predict_time, accuracy, error
))
stats(17591, 2.583754)
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
df = pd.read_csv('pass.csv', header=0, dtype={
'file_name': str,
'file_size': int,
'run_time': float,
'time_taken_angr': float,
'time_taken_klee': float,
'passwords': str,
'is_password_cracked_by_angr': bool,
'password_generated_by_angr': str,
'is_password_cracked_by_klee': bool,
'password_generated_by_klee': str,
'file_path': str
})
df[['_' ,'_' , '_', '_', '_', '_', '_', '_', 'combination', 'obfuscation', '_']] = df.file_path.str.split('/', expand=True)
df.combination = df.combination.astype('category')
df.obfuscation = df.obfuscation.astype('category')
# Load CSV and columns
Y = df[(df.combination == 'pass9') & (df.obfuscation == 'CAD')].time_taken_angr
X = df[(df.combination == 'pass9') & (df.obfuscation == 'CAD')].file_size
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
sample_size = 99
# Split the data into training/testing sets
X_train = X[:-sample_size]
X_test = X[-sample_size:]
# Split the targets into training/testing sets
Y_train = Y[:-sample_size]
Y_test = Y[-sample_size:]
# Plot outputs
plt.scatter(X_test, Y_test, color='#f39c12')
plt.title('Obfuscation executive - CAD')
plt.xlabel('File size')
plt.ylabel('Time taken to crack by Angr')
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='#c0392b',linewidth=2)
#plt.show()
def stats(file_size, actual_time):
predict_time = regr.predict([[file_size]])[0][0]
error = (predict_time - actual_time) / actual_time * 100
accuracy = 100 - error
print('file size = {}, acutal time = {:.2f}, predict time = {:.2f}, accuracy = {:.2f}%, error = {:.2f}%'.format(
file_size, actual_time, predict_time, accuracy, error
))
stats(185834, 13.719730)
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, metrics
df = pd.read_csv('pass.csv', header=0, dtype={
'file_name': str,
'file_size': int,
'run_time': float,
'time_taken_angr': float,
'time_taken_klee': float,
'passwords': str,
'is_password_cracked_by_angr': bool,
'password_generated_by_angr': str,
'is_password_cracked_by_klee': bool,
'password_generated_by_klee': str,
'file_path': str
})
df[['_' ,'_' , '_', '_', '_', '_', '_', '_', 'combination', 'obfuscation', '_']] = df.file_path.str.split('/', expand=True)
df.combination = df.combination.astype('category')
df.obfuscation = df.obfuscation.astype('category')
# Load CSV and columns
Y = df[(df.combination == 'pass9') & (df.obfuscation == 'VACD')].time_taken_angr
X = df[(df.combination == 'pass9') & (df.obfuscation == 'VACD')].file_size
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
sample_size = 99
# Split the data into training/testing sets
X_train = X[:-sample_size]
X_test = X[-sample_size:]
# Split the targets into training/testing sets
Y_train = Y[:-sample_size]
Y_test = Y[-sample_size:]
# Plot outputs
plt.scatter(X_test, Y_test, color='#f39c12')
plt.title('Obfuscation executive - VACD')
plt.xlabel('File size')
plt.ylabel('Time taken to crack by Angr')
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='#c0392b',linewidth=2)
#plt.show()
def stats(file_size, actual_time):
predict_time = regr.predict([[file_size]])[0][0]
error = (predict_time - actual_time) / actual_time * 100
accuracy = 100 - error
print('file size = {}, acutal time = {:.2f}, predict time = {:.2f}, accuracy = {:.2f}%, error = {:.2f}%'.format(
file_size, actual_time, predict_time, accuracy, error
))
stats(4242392, 275.158915)
# -
| linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/bundickm/Study-Guides/blob/master/Unit_1_Sprint_1_Data_Wrangling_and_Visualizations_Study_Guide.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="LC39gYz8ZVKF" colab_type="text"
# This study guide should reinforce and provide practice for all of the concepts you have seen in the past week. There are a mix of written questions and coding exercises, both are equally important to prepare you for the sprint challenge as well as to be able to speak on these topics comfortably in interviews and on the job.
#
# If you get stuck or are unsure of something remember the 20 minute rule. If that doesn't help, then research a solution with google and stackoverflow. Only once you have exausted these methods should you turn to your Team Lead - they won't be there on your SC or during an interview. That being said, don't hesitate to ask for help if you truly are stuck.
#
# Have fun studying!
# + [markdown] id="DI3Ev_xu1KRN" colab_type="text"
# # Resources
# [Pandas Documentation](https://pandas.pydata.org/pandas-docs/stable/reference/index.html)
#
# [Matplotlib Documentation](https://matplotlib.org/3.1.1/api/pyplot_summary.html)
#
# [Pandas EDA Cheat Sheet](https://github.com/bundickm/CheatSheets/blob/master/Data_Cleaning_and_Exploring_Cheat_Sheet.ipynb)
#
# [Pandas Data Wrangling Cheat Sheet](https://github.com/bundickm/CheatSheets/blob/master/Data_wrangling_cheatsheet.pdf)
#
# [Matplotlib Cheat Sheet](https://github.com/bundickm/CheatSheets/blob/master/MatplotLib_Cheat_Sheet.ipynb)
# + [markdown] id="-_4LgTTcDz-5" colab_type="text"
# # Setup
# + [markdown] id="xKAlguQSD7kl" colab_type="text"
# Import and alias the following libraries:
# - numpy
# - pandas
# - matplot
# - seaborn
# + id="_HUoZ4slCjSf" colab_type="code" colab={}
# + [markdown] id="9s_RXnzmEZjn" colab_type="text"
# Read in the auto data csv below and verify it's contents
# + id="qyfFozmfFMln" colab_type="code" colab={}
auto_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
# + id="VeKPIs40G98C" colab_type="code" colab={}
# + id="cYVrL8j1G-Jv" colab_type="code" colab={}
# + [markdown] id="t27UxbeWFxIo" colab_type="text"
# You should see there are no column headers. Read in the csv with the column names below and then check the first 5 and last 5 rows of the data frame
# + id="Tw59O017GDGu" colab_type="code" colab={}
columns = ['symboling','norm_loss','make','fuel','aspiration','doors',
'bod_style','drv_wheels','eng_loc','wheel_base','length','width',
'height','curb_weight','engine','cylinders','engine_size',
'fuel_system','bore','stroke','compression','hp','peak_rpm',
'city_mpg','hgwy_mpg','price']
# + id="V7m4Bl3UHqHt" colab_type="code" colab={}
# + id="wcQSNUISH1-Q" colab_type="code" colab={}
# + id="urjF32mMKAMr" colab_type="code" colab={}
# + [markdown] id="cCvbuEmUKC5-" colab_type="text"
# Check for nulls and any values that may be a placeholder for nulls
# + id="_m9hcNTzIhEs" colab_type="code" colab={}
# + id="ddeg6fHjIweq" colab_type="code" colab={}
# + [markdown] id="r3Rl0YUlKakm" colab_type="text"
# Replace the placeholders with `NaN` and then recheck for nulls.
# + id="XhiHSkWLKrgA" colab_type="code" colab={}
# + id="f53Ro229Krpz" colab_type="code" colab={}
# + [markdown] id="XCLv0Q5OK8RW" colab_type="text"
# # Exploration
# + [markdown] id="d7pDkXRVLAd5" colab_type="text"
# Find the following information about the dataframe:
# - Get the descriptive statistics for ALL features (numeric and categorical)
# - Get the data types for all features
# - What are the unique values for the `doors` column
# - How many rows and columns are in the dataframe?
# + id="Wg5IAWTsMJba" colab_type="code" colab={}
# + id="MPFetG3xNfbq" colab_type="code" colab={}
# + id="ME2pU16FNfsP" colab_type="code" colab={}
# + id="6iLxuMJDMKRg" colab_type="code" colab={}
# + [markdown] id="JZQFmkMcNjnB" colab_type="text"
# # Cleaning and Feature Engineering
# + [markdown] id="ZZewkUAiNqGs" colab_type="text"
# Complete the following actions on the dataframe:
# - Fill in the null values in the dataframe without dropping any rows or columns
# - Convert the `doors` column to `int`
# - Drop the `eng_loc` column
# - Create a new column in the df that is the `hp` divided by `price`
# + id="xI9SyY_IQqlY" colab_type="code" colab={}
# + id="EQC6rnz1QqgJ" colab_type="code" colab={}
# + id="j4-o-AhAQqbb" colab_type="code" colab={}
# + id="HX0uCs-KQqVC" colab_type="code" colab={}
# + [markdown] id="gpxXIPjf7ilK" colab_type="text"
# What does it mean to engineer a feature?
# + id="Y84U2ltU79ud" colab_type="code" colab={}
# + [markdown] id="ufQSY3l47-FU" colab_type="text"
# What is a `NaN` and why do we care about them?
# + id="MJq9qTiI8Vqp" colab_type="code" colab={}
# + [markdown] id="YJP6bMIJQsBF" colab_type="text"
# # Basic Visualizations
# + [markdown] id="fp_7Jn62QzoJ" colab_type="text"
# Make the following basic graphs with the features in the dataframe:
# - Line Plot
# - Scatterplot
# - Histogram
# - Bar Graph
# - Density
#
# All graphs should have a title, and label for each axis
# + id="GwL2EUjJ0yBv" colab_type="code" colab={}
# + id="APzaEbStRZpL" colab_type="code" colab={}
# + id="twLoG5MDRZ3c" colab_type="code" colab={}
# + id="zt2BnVO5RaWc" colab_type="code" colab={}
# + id="X-oapWkjRZyu" colab_type="code" colab={}
# + [markdown] id="8-UbABdR8hPY" colab_type="text"
# Name a graph you would use for the following cases:
# - Distribution
# - Change Over Time
# - Correlation
#
# You may find the answers in the Resources section above or through Google
# + id="Xm8X7yTt9bBS" colab_type="code" colab={}
# + id="b-LFI6Gm9bUy" colab_type="code" colab={}
# + id="K2QOfHUb9bkA" colab_type="code" colab={}
# + [markdown] id="0xXYDhpdRhQt" colab_type="text"
# # Crosstabs and Pivot Tables
# + [markdown] id="jz14-S2QR3DH" colab_type="text"
# Complete the following:
# - Make a crosstab of two of the categorical features
# - In a sentence or two, explain how to read the table and what the values mean
# - Make a graph from the crosstab
# - Make a crosstab with a categorical feature and `price`
# - Use `pd.cut` to create 5 bins for `price`
# - In a sentence or two, explain how to read the table and what the values mean
# - Make a graph from the crosstab
# - Create a [pivot table](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html)
# - In a sentence or two, explain how to read the table and what the values mean
# + id="_qC5u0i1UNwl" colab_type="code" colab={}
# + id="dGdArU8NUOEH" colab_type="code" colab={}
# + id="NhAXm5hXUOPI" colab_type="code" colab={}
# + id="sWUdFVslUOdY" colab_type="code" colab={}
# + id="H4HgSxXTUOxH" colab_type="code" colab={}
# + id="f5Me4YYzUNrc" colab_type="code" colab={}
# + id="CLMcSPdcUNj7" colab_type="code" colab={}
# + [markdown] id="vmyJ0VOSUpo-" colab_type="text"
# # Interpretations
# + [markdown] id="l4KrOj8JUrzj" colab_type="text"
# Write a short paragraph explaining some of the things you learned from the dataset. Avoid low hanging fruit like simply writing about the different descriptive stats. Focus on interesting relationships between columns, what you see in the graphs, etc.
# + id="1PqyuoWgVTpi" colab_type="code" colab={}
# + [markdown] id="4nDwljBQVixD" colab_type="text"
# # Tidy and Join
# + [markdown] id="_k1wR__VbIMo" colab_type="text"
# Combine `df_a`, `df_b`, and `df_c` joining on the ids in `df_a` to reveal the real names of The Hulk, Spiderman, and Superman.
# + id="HMwpqGfIZzt0" colab_type="code" outputId="5da041cd-2e4d-4200-a2de-05c39a0c4a14" colab={"base_uri": "https://localhost:8080/", "height": 143}
df_a = pd.DataFrame({'id_b':[1,2,3], 'id_c': [4,5,6]})
df_a
# + id="6eKvwxn5ajUZ" colab_type="code" outputId="24435f25-72f2-4ef7-8aa2-26502baa99c9" colab={"base_uri": "https://localhost:8080/", "height": 143}
df_b = pd.DataFrame({'id':[1,2,3], 'first_name': ['Bruce', 'Peter', 'Clark']})
df_b
# + id="pBIZJK7tamdO" colab_type="code" outputId="e05c115f-fbb2-410f-ed52-12b19f055ba6" colab={"base_uri": "https://localhost:8080/", "height": 143}
df_c = pd.DataFrame({'id': [4,5,6], 'last_name': ['Banner', 'Parker', 'Kent']})
df_c
# + id="ZAWU3yo1cigI" colab_type="code" colab={}
# + id="COhFWYjjciYD" colab_type="code" colab={}
# + [markdown] id="-3S5czwpZHY8" colab_type="text"
# Use `melt` to change the dataframe below to look like the image below.
# + id="3MACmvwfWQem" colab_type="code" outputId="baf8e023-8385-40d9-b5e3-683c7de6fe42" colab={"base_uri": "https://localhost:8080/", "height": 143}
messy = pd.DataFrame({'a': ['A', 'B', 'C'],
'b': [1, 2, 3],
'c': [4, 5, 6],
'd': [7, 8, 9]})
messy
# + [markdown] id="vxtdnU8dZjJd" colab_type="text"
# Make this:
#
# 
# + id="x7oidujvX08l" colab_type="code" colab={}
# + [markdown] id="dWhz2iOf4VeU" colab_type="text"
# In a sentence or two, explain what the tidy data format is.
# + id="VNGF_dye4tTc" colab_type="code" colab={}
# + [markdown] id="Rd6EpmRp9siX" colab_type="text"
# What are the types of joins?
# + id="0SjBr2579xMt" colab_type="code" colab={}
# + [markdown] id="pW5PDZLUckpC" colab_type="text"
# # Snazzy Graphs
# + [markdown] id="7dLMpelIGGNV" colab_type="text"
# Pick a graph from FiveThirtyEight and try to recreate it. Focus on the following items:
# - All text in the orginal graph (except the watermark) is present in your graph and in the same locations
# - All text in your graph is roughly the same size and look of the original graph
# - Gridlines, Ticklabels, and tickmark spacing matches
# - Coloring and/or style matches.
# + id="GAjE_xuDBxyM" colab_type="code" colab={}
| Unit_1_Sprint_1_Data_Wrangling_and_Visualizations_Study_Guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
url="https://raw.githubusercontent.com/afcarl/ebola-imc-public/master/data/kenema/test/pres-kgh/imputation-50.csv"
df=pd.read_csv(url,sep=",")
df.head()
df.columns
# Planteamos el problema de regresión logística como sigue:
#
# $$\mu_i=\sigma(\beta^T x_i)$$
#
# $$\sigma(z)=\frac{1}{1+e{-z}}$$
#
#
# +
def sigmoide(z):
'''
Devuelve el sigmoide de un vector
'''
return 1/(1+np.exp(-z))
def calc_mu(X,beta):
a=np.matmul(beta,np.transpose(X))
mu=sigmoide(a)
return mu
def f(X,y,beta):
'''
función que da la probabilidad de que el punto x pertenzca a la clase
'''
prob=calc_mu(X,beta)
lvn=-sum(y*np.log(prob)+(1-y)*(np.log(1-prob)))
return lvn
# -
# Usamos la pérdida como la *log verosimilitud negativa*
#
# $$F(\beta)=- \sum_{i=1}^{m}[y_i log\mu_i + (1-y_i)log(1-\mu_i)]$$
#
# y el gradiente está dado por:
#
# $$\nabla F(\beta)= X^T(\mu-Y)$$
#
# y la Hessiana:
#
# $$\nabla^2 F(\beta)= X^TSX$$
# +
def gradiente_f(X,y,beta):
mu=calc_mu(X,beta)
grad = np.matmul(np.transpose(X), mu-y)
return grad
def hessiana_f(X,y,beta):
mu=calc_mu(X,beta)
S=np.diag(mu*(1-mu))
hes=np.matmul(np.transpose(X),np.matmul(S,X))
return hes
# -
def normalize(x):
return x/np.sqrt(sum(x*x))
def clasifica(X, beta_hat):
mu=calc_mu(X,beta_hat)
yhat=mu
yhat[mu<.5]=0
yhat[mu>=.5]=1
return yhat
def gradient_descent(X,y,lr=.1,tol=10**(-6),max_iter=10**5,method="max"):
'''
Devuleve vector de px1 que es el beta minimo
'''
#inicializa
iteraciones=0
#inicializamos beta aleatoria
beta=np.random.uniform(0,1,X.shape[1])
#primera iteracion
pk = descent_direction(X,y,beta,method)
beta_new= beta - lr*pk
#condición de paro.
#El cambio total es menor que la tolerancia
while ((abs(f(X,y,beta) - f(X,y,beta_new))>tol) & (iteraciones<max_iter)):
iteraciones+=1 #contador de ciclo
beta = beta_new
pk = descent_direction(X,y,beta,method)
beta_new = beta - lr*pk
#if iteraciones>max_iter:
# break
print("iteraciones=",iteraciones)
return beta_new
# +
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
data=df.to_numpy()
y=data[:,0]
X=data[:,1:]
x_train, x_test, y_train, y_test=train_test_split(X,y,test_size=.2)
#scale data
scaler=MinMaxScaler()
x_train=scaler.fit_transform(x_train)
x_test=scaler.fit_transform(x_test)
# +
beta_hat=gradient_descent(x_train,y_train)
yhat=clasifica(x_test,beta_hat)
print("beta_hat=", beta_hat)
print("Error de clasificacion=",round(100*sum(abs(y_test-yhat))/len(yhat),2),"%")
# -
def descent_direction(X, y, beta, method="max"):
'''
This function determines the direction of the descent pk=-inv(Bk) grad f
'''
if(method=="max"):
pk=gradiente_f(X,y,beta)
elif(method=="newton"):
grad=gradiente_f(X,y,beta)
hess=hessiana_f(X,y,beta)
inv_hess=np.linalg.inv(hess)
pk=np.matmul(inv_hess,grad)
return normalize(pk)
# +
beta_hat=gradient_descent(x_train,y_train, method="newton")
yhat=clasifica(x_test,beta_hat)
print("beta_hat=", beta_hat)
print("Error de clasificacion=",round(100*sum(abs(y_test-yhat))/len(yhat),2),"%")
# -
| notebooks/.ipynb_checkpoints/script-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.read_excel("AAPL.xlsx", sheet_name="sheet1")
dataset = pd.read_excel("AAPL.xlsx", sheet_name="sheet1")
dataset[:200]
dataset[200:]
train_set = dataset[:200]
test_set = dataset[200:]
train_y = train_set["y"]
train_y
train_x = train_set[["x1", "x2", "x3"]]
train_x
test_y = test_set["y"]
test_y
test_x = test_set[["x1", "x2", "x3"]]
test_x
from sklearn import linear_model
reg = linear_model.LinearRegression()
# +
reg.fit (train_x, train_y)
reg.coef_
# -
reg.predict(test_x)
| AAPL_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.12 64-bit (''3.8.12'': pyenv)'
# name: python3
# ---
# + language="bash"
# # Airflow needs a home. `~/airflow` is the default, but you can put it
# # somewhere else if you prefer (optional)
# export AIRFLOW_HOME=~/airflow
#
# # Install Airflow using the constraints file
# AIRFLOW_VERSION=2.2.2
# PYTHON_VERSION="$(python --version | cut -d " " -f 2 | cut -d "." -f 1-2)"
# # For example: 3.6
# CONSTRAINT_URL="https://raw.githubusercontent.com/apache/airflow/constraints-${AIRFLOW_VERSION}/constraints-${PYTHON_VERSION}.txt"
# # For example: https://raw.githubusercontent.com/apache/airflow/constraints-2.2.2/constraints-3.6.txt
# # echo "apache-airflow==${AIRFLOW_VERSION}" --constraint "${CONSTRAINT_URL}"
# pip install --upgrade pip
# pip install "apache-airflow==${AIRFLOW_VERSION}" --constraint "${CONSTRAINT_URL}" --user
#
# # # The Standalone command will initialise the database, make a user,
# # # and start all components for you.
# # airflow standalone
#
# # Visit localhost:8080 in the browser and use the admin account details
# # shown on the terminal to login.
# # Enable the example_bash_operator dag in the home page
# + language="bash"
# env PGPASSWORD=<PASSWORD>ise psql -U corise -d dbt -c 'CREATE ROLE reporting;'
# env PGPASSWORD=<PASSWORD>ise psql -U corise -d dbt -c 'CREATE SCHEMA dbt_ramnath_v'
# dbt compile --project-dir=/workspace/dbt-explore/dbt-greenery
# +
# %%writefile ~/airflow/dags/dbt-greenery.py
import logging
from copy import copy
from logging import Logger
from typing import Dict, List, Optional
from airflow_dbt.operators.dbt_operator import (
DbtSeedOperator,
DbtSnapshotOperator,
DbtRunOperator,
DbtTestOperator,
)
from airflow import DAG
from airflow.models import Variable, BaseOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.task_group import TaskGroup
DbtRunOperator.ui_color = '#f5f5dc'
logger = logging.getLogger(__name__)
class DbtNode:
def __init__(self, full_name: str, children: List[str], config: Optional[dict]):
self.full_name = full_name
self.children = children
self.is_model = self.full_name.startswith('model')
self.name = self.full_name.split('.')[-1]
self.is_persisted = self.is_model and config["materialized"] in ['table', 'incremental', 'view']
class DbtTaskGenerator:
def __init__(
self, dag: DAG, manifest: dict
) -> None:
self.dag: DAG = dag
self.manifest = manifest
self.persisted_node_map: Dict[str, DbtNode] = self._get_persisted_parent_to_child_map()
self.logger: Logger = logging.getLogger(__name__)
def _get_persisted_parent_to_child_map(self) -> Dict[str, DbtNode]:
node_info = self.manifest['nodes']
parent_to_child_map = self.manifest['child_map']
all_nodes: Dict[str, DbtNode] = {
node_name: DbtNode(
full_name=node_name,
children=children,
config=node_info.get(node_name, {}).get('config')
)
for node_name, children in parent_to_child_map.items()
}
persisted_nodes = {
node.full_name: DbtNode(
full_name=node.full_name,
children=self._get_persisted_children(node, all_nodes),
config=node_info.get(node_name, {}).get('config')
)
for node_name, node in all_nodes.items()
if node.is_persisted and node.full_name
}
return persisted_nodes
@classmethod
def _get_persisted_children(cls, node: DbtNode, all_nodes: Dict[str, DbtNode]) -> List[str]:
persisted_children = []
for child_key in node.children:
child_node = all_nodes[child_key]
if child_node.is_persisted:
persisted_children.append(child_key)
else:
persisted_children += cls._get_persisted_children(child_node, all_nodes)
return persisted_children
def add_all_tasks(self) -> None:
nodes_to_add: Dict[str, DbtNode] = {}
for node in self.persisted_node_map:
included_node = copy(self.persisted_node_map[node])
included_children = []
for child in self.persisted_node_map[node].children:
included_children.append(child)
included_node.children = included_children
nodes_to_add[node] = included_node
self._add_tasks(nodes_to_add)
def _add_tasks(self, nodes_to_add: Dict[str, DbtNode]) -> None:
dbt_model_tasks = self._create_dbt_run_model_tasks(nodes_to_add)
self.logger.info(f'{len(dbt_model_tasks)} tasks created for models')
for parent_node in nodes_to_add.values():
if parent_node.is_model:
self._add_model_dependencies(dbt_model_tasks, parent_node)
def _create_dbt_run_model_tasks(self, nodes_to_add: Dict[str, DbtNode]) -> Dict[str, BaseOperator]:
# dbt_docker_image_details = Variable.get("docker_dbt-data-platform", deserialize_json=True)
dbt_model_tasks: Dict[str, BaseOperator] = {
node.full_name: self._create_dbt_run_task(node.name)
for node in nodes_to_add.values()
if node.is_model
}
return dbt_model_tasks
def _create_dbt_run_task(self, model_name: str) -> BaseOperator:
# This is where you create a task to run the model - see
# https://docs.getdbt.com/docs/running-a-dbt-project/running-dbt-in-production#using-airflow
# We pass the run date into our models: f'dbt run --models={model_name} --vars '{"run_date":""}'
# return DummyOperator(dag=self.dag, task_id=model_name)
return DbtRunOperator(
dag=self.dag,
task_id=model_name,
dir='/workspace/dbt-explore/dbt-greenery',
models=model_name,
verbose=True
)
@staticmethod
def _add_model_dependencies(dbt_model_tasks: Dict[str, BaseOperator], parent_node: DbtNode) -> None:
for child_key in parent_node.children:
child = dbt_model_tasks.get(child_key)
if child:
dbt_model_tasks[parent_node.full_name] >> child
from datetime import datetime
from airflow import DAG
import json
import os
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
with open(f"{CUR_DIR}/manifest.json", "r") as file:
manifest = json.load(file)
dag = DAG(
dag_id="dbt_connected_task_creator_test_dag",
start_date=datetime(2021, 12, 6),
schedule_interval="0 1 * * *",
)
dbt_task_generator = DbtTaskGenerator(dag, manifest)
dbt_task_generator.add_all_tasks()
# -
# !cp dags/foobar.py ~/airflow/dags
# +
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List
import json
class DbtNode:
def __init__(self, node_name: str, node_info: Dict, node_children: List):
self.node_info = node_info
self.node_name = node_name
self.parents = node_info.get('depends_on', {}).get('nodes', [])
self.children = node_children
self.materialized = node_info.get('config', {}).get('materialized', '')
self.is_model = node_name.startswith('model')
self.is_persisted = self.is_model and self.materialized in ['table', 'incremental', 'view']
def __repr__(self):
return f'<DbtNode> {self.node_name} ({self.materialized})'
@dataclass
class DbtProject:
dbt_project_dir: str = None
def __post_init__(self):
self.manifest = self.__load_manifest()
self.nodes = self.__load_nodes()
def __load_manifest(self):
manifest_path = Path(self.dbt_project_dir) / 'target/manifest.json'
return json.loads(manifest_path.read_text())
def __load_nodes(self):
child_map = self.manifest['child_map']
return [
DbtNode(
node_name,
node_info,
child_map.get(node_name, [])
)
for node_name, node_info in self.manifest.get('nodes', {}).items()
]
dbt = DbtProject('/workspace/dbt-explore/dbt-greenery')
| dbt-airflow/airflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="2fYMUBtthNkq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="456c177b-43d3-4ab5-c42d-62d6cb53d0c0"
# !pip install transformers
# + id="FRkkVm1eXdNr" colab_type="code" colab={}
import transformers
# + id="27x77PXhWuHV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="2489309f-72c6-4083-bf4b-43b996348925"
batch_size = 10
model_path = 'gpt2_epoch5.bin'
max_seq_len = 300
epochs = 5
data_path = '/content/eda-data.csv'
tokenizer = transformers.GPT2Tokenizer.from_pretrained('gpt2')
model = transformers.GPT2LMHeadModel.from_pretrained('gpt2')
# + id="faXOwfpzhKQz" colab_type="code" colab={}
import numpy as np
import re
def remove_source(text):
cln_text = text
if '(Source' in cln_text:
cln_text,_,_ = cln_text.partition('(Source')
elif '[Written ' in cln_text:
cln_text,_,_ = cln_text.partition('[Written')
return cln_text
def clean_synopsis(data):
# removing hentai and kids tags
data = data[(data.Hentai != 1) & (data.Kids != 1)]
synopsis = data.synopsis
synopsis = synopsis.apply(lambda x: str(x))
# removing very small synopsis
synopsis = synopsis.apply(lambda x: x if ((len(str(x).strip().split())<=300) and len(str(x).strip().split())>30 ) else -1)
synopsis = synopsis[synopsis!=-1]
# removing source text
synopsis = synopsis.apply(lambda x: remove_source(x))
# removing japanese characters
synopsis = synopsis.apply(lambda x: re.sub("([^\x00-\x7F])+"," ",x))
# remove symbols
rx = re.compile('[&#/@`)(;<=\'"$%>]')
synopsis = synopsis.apply(lambda x: rx.sub('',x))
synopsis = synopsis.apply(lambda x: x.replace('>',""))
synopsis = synopsis.apply(lambda x: x.replace('`',""))
synopsis = synopsis.apply(lambda x: x.replace(')',""))
synopsis = synopsis.apply(lambda x: x.replace('(',""))
# removing adaptation animes (some relevant might get deleted but there aren`t a lot so we wont be affected as much)
synopsis = synopsis[synopsis.apply(lambda x: 'adaptation' not in str(x).lower())]
synopsis = synopsis[synopsis.apply(lambda x: 'music video' not in str(x).lower())]
synopsis = synopsis[synopsis.apply(lambda x: 'based on' not in str(x).lower())]
synopsis = synopsis[synopsis.apply(lambda x: 'spin-off' not in str(x).lower())]
return synopsis.reset_index(drop=True)
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# + id="EZ6sCWMhhZvn" colab_type="code" colab={}
import torch
class AnimeDataset():
def __init__(self,data):
self.eos_tok = '<|endoftext|>'
synopsis = clean_synopsis(data)
synopsis = synopsis.apply(lambda x: str(x) + self.eos_tok)
self.synopsis = synopsis.tolist()
self.pad_tok = tokenizer.encode(['<|pad|>'])
def __getitem__(self,item):
synopsis = self.synopsis[item]
tokens = tokenizer.encode(synopsis)
mask = [1]*len(tokens)
max_len = max_seq_len
if max_len>len(tokens):
padding_len = max_len - len(tokens)
tokens = tokens + self.pad_tok*padding_len
mask = mask + [0]*padding_len
else:
tokens = tokens[:max_len]
mask = mask[:max_len]
if tokens[-1]!= tokenizer.encode(self.eos_tok)[0]:
tokens[-1] = tokenizer.encode(self.eos_tok)[0]
return {'ids':torch.tensor(tokens,dtype = torch.long),
'mask': torch.tensor(mask,dtype = torch.long),
'og_synpsis':synopsis}
def __len__(self):
return len(self.synopsis)
# + id="6mHk-qWfhiRC" colab_type="code" colab={}
from tqdm import tqdm
import torch
import numpy as np
def train_fn(model,dataloader,optimizer,scheduler,device):
model.train()
tk0 = tqdm(dataloader, total = len(dataloader), leave = True, position = 0)
train_loss = AverageMeter()
losses = []
for bi,d in enumerate(tk0):
ids = d['ids'].to(device,dtype = torch.long)
mask = d['mask'].to(device,dtype = torch.long)
loss,out = model(input_ids = ids, labels = ids, attention_mask = mask)[:2]
train_loss.update(loss.item())
loss.backward()
losses.append(loss.item())
optimizer.step()
scheduler.step()
model.zero_grad()
tk0.set_postfix(loss = train_loss.avg)
return np.mean(losses)
# + id="QFPY6EcMhru4" colab_type="code" colab={}
import pandas as pd
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
import torch
def run():
data = pd.read_csv(data_path)
dataset = AnimeDataset(data = data)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=batch_size,shuffle=True)
device = 'cuda'
model.to(device)
optimizer = AdamW(model.parameters(),lr = 0.0001,weight_decay = 0.003)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=10,num_training_steps = int(len(data)/batch_size * epochs))
best_loss = 111111
for epoch in range(epochs):
loss = train_fn(model,dataloader,optimizer,scheduler,device)
if loss<best_loss:
best_loss = loss
torch.save(model.state_dict(),model_path)
torch.cuda.empty_cache
# + id="eZCtiYapVdpP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="02f8ef82-d2a3-42c5-d75e-66a38c38417d"
run()
# + id="SN0LiSFFKtiM" colab_type="code" colab={}
def generate_text(input_text,device = 'cuda',max_len = 300):
pad_tok = tokenizer.encode(['<|pad|>'])[0]
model.load_state_dict(torch.load(model_path))
model.to(device)
model.eval()
input_ids = tokenizer.encode(input_text)
mask = [1]*len(input_ids)
padding_len = max_seq_len - len(input_ids)
input_ids = input_ids #+ pad_tok*padding_len
mask = mask + [0]*padding_len
ids = torch.tensor(input_ids,dtype = torch.long).to(device).unsqueeze(0)
mask = torch.tensor(mask,dtype = torch.long).to(device).unsqueeze(0)
#print(ids[0])
sample_out = model.generate(ids, min_length = 30,max_length=max_len, pad_token_id=pad_tok,
top_p=0.85, early_stopping=True, do_sample=True, num_beams = 5, no_repeat_ngram_size = 2,num_return_sequences=1)
print('Generated Text:\n\n',tokenizer.decode(sample_out[0],skip_special_tokens = True))
# + id="_m31rYeg8gc3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="78a53378-a696-46be-e8c2-bf9724c3659e"
generate_text('When the night',device = 'cuda')
| GPT2 Generator/Anime_Generator_GPT2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from queue import Queue
import threading
from socketIO_client import SocketIO, BaseNamespace
import numpy as np
import time
# +
def run_parallel_in_threads(target, args_list):
globalparas = []
result = Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target = task_wrapper, args = args) for args in args_list]
for t in threads:
t.start()
for t in threads:
t.join()
while not result.empty():
globalparas.append(result.get())
globalparas = list(filter(None, globalparas))
return globalparas
def get_time_5001(i):
last_time = time.time()
live_namespace = SocketIO('http://localhost', 5001).define(BaseNamespace, '/live')
live_namespace.emit('event',{'id':int(i)})
return [time.time()-last_time, i]
def get_time_5000(i):
last_time = time.time()
live_namespace = SocketIO('http://localhost', 5000).define(BaseNamespace, '/live')
live_namespace.emit('event',{'id':int(i)})
return [time.time()-last_time, i]
def get_time_80(i):
last_time = time.time()
live_namespace = SocketIO('http://localhost/socket.io', 80).define(BaseNamespace, '/live')
live_namespace.emit('event',{'id':int(i)})
return [time.time()-last_time, i]
# -
live_namespace = SocketIO('http://localhost/socket.io', 80).define(BaseNamespace, '/live')
live_namespace.emit('event',{'id':int(0)})
def stress_test(socketio, count, per_second):
aranged = np.arange(count)
for index in range(0,aranged.shape[0],per_second):
batch = aranged[index:min(index + per_second,aranged.shape[0])]
threads = []
for k in batch:
threads.append((k,))
outputs = run_parallel_in_threads(socketio, threads)
total = 0
for i in outputs:
total += float(i[0])
print('index %d, total time taken %f s, average time taken %f s'%(index, total, total / len(outputs)))
time.sleep(2)
stress_test(get_time_80, 50,10)
| simple-backend/11.flask-socketio-redis-nginx-loadbalancer/stress-test-socketio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="SbgUfOOzTTXi"
# # 데이터 시각화가 필요한 이유
#
# * 앤스콤 4분할 데이터(anscombe' quartet)는 데이터 시각화의 중요성을 보여주는 전형적이 사례이다.
# * 영국의 프랭크 앤스콤이 데이터를 시각화 하지 않고 수치에만 의존할 때 발생하는 함정을 보여주기 위한 데이터 이다.
# * 해당 데이터는 4개의 그룹으로 구성되어 있으며 모든 데이터 그룹이 x, y 값을 가지고 있다.
# * 이 4개 그룹은 각각 평균, 분산, 상관관계, 회귀선이 같다는 특징이 있다.
# * 수치 결과만 보면 4개 그룹의 데이터가 서로 같을 것이라고 착각할 수 있다.
# + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" executionInfo={"elapsed": 1149, "status": "ok", "timestamp": 1593086085836, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="tVs2YbAFTTXj" outputId="ae50693d-bbdc-4f26-fdb5-1e3ed4029fef"
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# + colab={} colab_type="code" id="2-kgRh4_TTXl"
anscombe = pd.read_csv("../data/anscombe.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 202} colab_type="code" executionInfo={"elapsed": 663, "status": "ok", "timestamp": 1592976166761, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="tv6FXmQITTXn" outputId="3bf1c7b5-fb17-4d86-8bd8-8495195a388d"
anscombe.head()
# + [markdown] colab_type="text" id="NNLseDL7TTXq"
# **기술통계량을 확인해 보면 4개 그룹의 수치적 요약값이 같은 것을 알 수 있다.**
# + colab={"base_uri": "https://localhost:8080/", "height": 233} colab_type="code" id="DG8FwhmNTTXq" outputId="14053bbb-1b61-43f5-dfb5-a4ac1d2f88af"
anscombe.groupby('dataset').agg(['mean','std'])
# + [markdown] colab_type="text" id="tVnCSSbgTTXs"
# # 산점도로 시각화 해보기
# 각 그룹을 비교하기 위한 시각화
# + colab={} colab_type="code" id="5cod-KjOTTXt"
dataset1 = anscombe[anscombe['dataset']=='I']
dataset2 = anscombe[anscombe['dataset']=='II']
dataset3 = anscombe[anscombe['dataset']=='III']
dataset4 = anscombe[anscombe['dataset']=='IV']
# + [markdown] colab_type="text" id="g2w55nEfD97k"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" executionInfo={"elapsed": 1302, "status": "ok", "timestamp": 1592976224673, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="fwHy2j_HTTXv" outputId="83ffbf96-6443-415d-b003-f98d45b66c60"
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
ax1.plot(dataset1['x'], dataset1['y'], 'o'); ax1.set_title('dataset1')
ax2.plot(dataset2['x'], dataset2['y'], 'o'); ax2.set_title('dataset2')
ax3.plot(dataset3['x'], dataset3['y'], 'o'); ax3.set_title('dataset3')
ax4.plot(dataset4['x'], dataset4['y'], 'o'); ax4.set_title('dataset4')
fig.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="L9KdQzcnTTXx"
# * 평균, 분산, 상관관계 등의 통걔적 수치가 같아도 그래프의 형태는 다를 수 있다.
# * 데이터 분석시 수치에만 의존하면 잘못된 판단을 할 수 있다.
# + colab={} colab_type="code" id="reI6RmCKTTXx"
# + [markdown] colab_type="text" id="InfO8IncTTXz"
# # Matplotlib 실습
# 먼저 matplotlib.pyplot을 import한다. 실습에 필요한 numpy도 import한다.
# jupyter notebook에서 plot이 바로 표시 될 수 있도록 매직 명령어를 사용한다.
# > %matplotlib inline
# + colab={} colab_type="code" id="RjuQFx6YTTX0"
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# %matplotlib inline
# + [markdown] colab_type="text" id="xErdJGSeTTX1"
# 간단히 다음과 같은 sine 함수 그래프와 cosine함수 그래프를 그려보자.
# + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" executionInfo={"elapsed": 775, "status": "ok", "timestamp": 1592980938685, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="xuoFba2DTTX2" outputId="c97e2621-ba77-4444-81c0-28c0273d3722"
x = np.linspace(0,10,100) # 0~10 등분을 해서 숫자 100개 생성
plt.plot(x, np.sin(x), '-')
plt.plot(x, np.cos(x), '--')
# + [markdown] colab_type="text" id="NsR0raolTTX4"
# subplot 기능을 활용하여 sine함수와 cosine함수 그래프를 다음과 같이 그려보자
# + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" executionInfo={"elapsed": 711, "status": "ok", "timestamp": 1592981027255, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="WXk1oBaPTTX4" outputId="1e1a7246-4199-4c84-e2ea-0e8ed235c7b3"
plt.figure()
plt.subplot(2,1,1)
plt.plot(x, np.sin(x))
plt.subplot(212)
plt.plot(x, np.cos(x))
# + [markdown] colab_type="text" id="13h9nD_nTTX6"
# 플롯의 선 색상과 스타일을 다음과 같이 바꾸어서 그려보자.
# - color : r,g,b,c,y,m,k,w
# - linestyle : -, --, :, o, ^
# + colab={} colab_type="code" id="DeoqEZa-TTX7"
# ?plt.plot(x)
# + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" executionInfo={"elapsed": 695, "status": "ok", "timestamp": 1592981177095, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="x0jBM2rjTTX9" outputId="a09d1da6-fd0c-4175-caec-0e1583a54033"
plt.plot(x, 'r-')
plt.plot(x+1, 'g--')
plt.plot(x+2, 'b:')
plt.plot(x+3, 'y-.')
# + [markdown] colab_type="text" id="d33pqr99TTX-"
# plt.xlim()과 plt.ylim() 을 사용하여 x축과 y축을 제한 할 수 있다.
# sine 그래프를 그리고 x축을 -1 ~ 11로 하고 y축을 -1.5 ~ 1.5로 나타내 보자.
# + colab={"base_uri": "https://localhost:8080/", "height": 288} colab_type="code" executionInfo={"elapsed": 754, "status": "ok", "timestamp": 1592981204843, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="4XapZhpdTTX_" outputId="c619096e-b313-4869-f1d0-256fe860a911"
plt.plot(x, np.sin(x))
plt.xlim(-1, 11)
plt.ylim(-1.5, 1.5)
# + [markdown] colab_type="text" id="ymk8SGbNTTYB"
# x축과 y축의 비율을 동일하게 맞출 수 있다.
# + colab={} colab_type="code" id="_jPR2EBNTTYB" outputId="3df5cfa9-5f42-4fb9-dd16-316a994edb0a"
plt.plot(x, np.sin(x))
plt.axis('equal')
# + [markdown] colab_type="text" id="PpB40_qyTTYD"
# x축과 y축에 label을 추가하고 그래프의 제목을 추가한다.
# + colab={"base_uri": "https://localhost:8080/", "height": 313} colab_type="code" id="By55Q09CTTYE" outputId="cbcc62a4-3f81-4bac-a107-f7d7c6b5acbc"
plt.plot(x, np.sin(x))
plt.title('Sine Curve')
plt.xlabel('x')
plt.ylabel('sin(x)')
# + [markdown] colab_type="text" id="TuTmCpdoTTYG"
# Legend의 표시
# 각 그래프 선에 대해서 label을 붙이고 그래프에서 legend로 표시한다.
# legend loc 옵션 : 'upper right', 'upper left', 'lower left','lower right' 등으로 위치 지정 가능하다.
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" id="nj0gsyj_TTYG" outputId="4aa0f252-4f49-4fe0-a603-07848096d934"
plt.subplot(2,1,1)
plt.plot(x, np.sin(x), '-g', label ='sin(x)')
plt.legend(loc='lower left')
plt.subplot(2,1,2)
plt.plot(x, np.cos(x), ':b', label ='cos(x)')
plt.legend()
# + [markdown] colab_type="text" id="iCEy69oBTTYI"
# ### 다중 서브플롯
# * 서로 다른 데이터 뷰를 비교
# * plt.subplot : 간단한 서브플롯의 그리드
# * plt.tight_layout() : 서브플롯 간격유지
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" executionInfo={"elapsed": 1553, "status": "ok", "timestamp": 1592981343238, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="xDtL7wI2TTYJ" outputId="e69326d0-b6ec-4d01-9bbf-ddd8f28a003c"
for i in range(1,7):
plt.subplot(2,3,i)
plt.text(0.5, 0.5, str(i), fontsize=18, ha='center')
plt.tight_layout() # 축 겹치지 않게 재배치
# + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="LzOrSynmTTYM" outputId="6a801bb9-a9d3-486f-a5da-d9f0002ffa84"
plt.subplot(2,2,1); plt.plot(x, np.sin(x))
plt.subplot(2,2,2); plt.plot(x, np.cos(x))
plt.subplot(2,1,2); plt.plot(x)
plt.show()
# + [markdown] colab_type="text" id="hoMz2k9nTTYP"
# # Matplotlib로 다양한 그래프 그리기
# + colab={} colab_type="code" executionInfo={"elapsed": 2185, "status": "ok", "timestamp": 1593086140432, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="FGtfTPAqTTYP"
# Data Loading
iris = pd.read_csv("../data/iris.csv")
tips = pd.read_csv("../data/tips.csv")
# + [markdown] colab_type="text" id="fsOxU7yUTTYR"
# ### matplotlib에서 히스토그램 그리기
# * 도수 분포표를 시각화 한 것.
# * 연속형 변수의 구간별 도수를 막대의 길이로 나타내는 그래프
# * 데이터 분석 단계에서 변수의 분포, 중심 경향, 퍼짐 정도, 치우침 정도 등을 한눈에 살펴볼 수 있는 시각화 종류로 히스토그램이 많이 사용
#
#
# bins=x 값을 변경해가면서 여러번 시도해 본다.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" executionInfo={"elapsed": 1176, "status": "ok", "timestamp": 1593087305537, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="5j9gQHlPTTYR" outputId="699bfdd3-8c49-4218-8717-98940f11d797"
plt.hist(iris['sepal_width'], bins=10)
plt.show()
# + [markdown] colab_type="text" id="21l1g3veTTYT"
# Y축을 빈도수(frequency)가 아니라 density로 하고 싶을 때는 density=True 를 설정
# + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" executionInfo={"elapsed": 1233, "status": "ok", "timestamp": 1593086154232, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="Ed99Uk_zTTYT" outputId="b3e3cb1a-f643-4d5b-c39a-e1c3d0eea197"
plt.hist(iris['sepal_width'], bins=10, density=True)
plt.show()
# + [markdown] colab_type="text" id="Bczlb7KwTTYV"
# ### matplotlib으로 막대 그리프 그리기
# 요일(day)별로 tip 의 합계를 구해서 막대 그래프로 비교
# + colab={"base_uri": "https://localhost:8080/", "height": 125} colab_type="code" executionInfo={"elapsed": 1153, "status": "ok", "timestamp": 1593087670860, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="o9AKGwEGXvt9" outputId="cf4ca75e-79a9-4672-8c61-7929bbcefaee"
import numpy as np
tips_sum_by_day = tips.groupby('day').tip.sum().sort_values() # 막대그래프를 크기 순서로 정렬
tips_sum_by_day
# + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" executionInfo={"elapsed": 1130, "status": "ok", "timestamp": 1593087674112, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="D3qcAH5uTTYW" outputId="969c46fa-0e01-40c3-f26b-888f0aea52f4"
label = tips_sum_by_day.index
index = np.arange(len(label))
plt.bar(label, tips_sum_by_day)
plt.show()
# + [markdown] colab_type="text" id="M3oIDUuLTTYX"
# **옆으로 누운 막대그래프는 plt.barh() 함수를 사용**
# + colab={"base_uri": "https://localhost:8080/", "height": 304} colab_type="code" executionInfo={"elapsed": 1181, "status": "ok", "timestamp": 1593087687969, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="bG_LuvaaTTYY" outputId="e77368b6-fdf0-463c-a252-e21a0ff9996f"
plt.barh(index, tips_sum_by_day)
plt.title('Sum of Tips by Day', fontsize=18)
plt.ylabel('Day', fontsize=15)
plt.xlabel('Sum of Tips', fontsize=15)
plt.yticks(index, label, fontsize=13, rotation=0)
plt.show()
# + [markdown] colab_type="text" id="VlsDYlyRTTYZ"
# ### 그룹으로 나누어 비교하는 막대그래프
# 성별(sex)에 따라 요일별 tip의 평균을 비교한다.
# + colab={"base_uri": "https://localhost:8080/", "height": 267} colab_type="code" executionInfo={"elapsed": 1170, "status": "ok", "timestamp": 1593087695053, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="AhkNl6pATTYa" outputId="f94cba5e-fbcd-4679-bed5-134060f6ab1f"
tips_mean_by_day_male = tips[tips['sex'] == 'Male'].groupby('day').tip.mean()
tips_mean_by_day_female = tips[tips['sex'] == 'Female'].groupby('day').tip.mean()
label = ['Thur', 'Fri', 'Sat', 'Sun']
index = np.arange(len(label))
plt.bar(index, tips_mean_by_day_male, width=0.4, label='Male')
plt.bar(index+0.5, tips_mean_by_day_female, width=0.4, label='Female')
plt.xticks(index, label)
plt.legend()
plt.show()
# + [markdown] colab_type="text" id="3l5uMDMOTTYb"
# ### 누적막대그래프로 표시하기
# + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" executionInfo={"elapsed": 1161, "status": "ok", "timestamp": 1593087195560, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="mnreh_LzTTYc" outputId="a8ef962f-13d6-4766-9df2-1469e5940bf5"
# summary by group
p1 = plt.bar(index, tips_mean_by_day_male, color='b', alpha=0.5, label='Male')
p2 = plt.bar(index, tips_mean_by_day_female, color='r', alpha=0.5,
bottom=tips_mean_by_day_male, label='Female')
plt.xticks(index, label)
plt.legend()
plt.show()
# + [markdown] colab_type="text" id="NOMN7oHMTTYe"
# ### Matplotlib를 이용하여 산점도 그리기
# + colab={"base_uri": "https://localhost:8080/", "height": 304} colab_type="code" executionInfo={"elapsed": 1283, "status": "ok", "timestamp": 1593087200836, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="ydKw1KhwTTYf" outputId="dd9c9f57-08c6-410d-d31d-c088eee1afc3"
plt.plot('petal_length', # x
'petal_width', # y
data=iris,
linestyle='none',
marker='o',
markersize=10,
color='blue',
alpha=0.5)
plt.title('Scatter Plot of iris by matplotlib', fontsize=20)
plt.xlabel('Petal Length', fontsize=14)
plt.ylabel('Petal Width', fontsize=14)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1173, "status": "ok", "timestamp": 1593087205615, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="O8NC1qNCTTYg" outputId="0824e26b-67f0-4da4-bd0a-2bd2a2240e66"
# 그룹별 산점도 그리기
groups = iris.groupby('species')
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.petal_length,
group.petal_width,
marker='o',
linestyle='',
label=name)
ax.legend(fontsize=12, loc='upper left') # legend position
# + [markdown] colab_type="text" id="UA557hqcTTYi"
# ### plot()함수와 scatter()함수 차이
# plot()대신에 scatter()를 써서 산점도를 그리는 것도 가능하다.
#
# plot()함수 역시 마커만 보여주는 플롯이 가능하고 속도도 scatter()함수에 비해 더 빠르다.
# 하지만, 마커를 그리는데 있어 scatter()함수는 plot()함수에서 제공하지 않는 높은 자유도를 제공하며 다양한 플롯을 그리는 것이 기능하다.
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1138, "status": "ok", "timestamp": 1593087213578, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="P6_tSA0QTTYj" outputId="527e0a2e-d813-4791-cea7-e0bf6ef72413"
x = np.random.randint(1,10,15)
y = np.random.randint(1,10,15)
plt.plot(x, y,'o')
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1206, "status": "ok", "timestamp": 1593087221519, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="XGtV4WfJTTYk" outputId="b1548564-cf4c-4f69-beb1-780af13ee94d"
size = np.random.randint(1,500,15)
color = np.random.randint(1,100,15)
plt.scatter(x,y, s=size, c=color)
# + colab={"base_uri": "https://localhost:8080/", "height": 313} colab_type="code" executionInfo={"elapsed": 1236, "status": "ok", "timestamp": 1593087225710, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="e4ls3cRMTTYm" outputId="325a5894-9b8b-4da9-e85f-74b96177da6f"
scatter_plot = plt.figure()
axes1 = scatter_plot.add_subplot(1, 1, 1)
axes1.scatter(tips['total_bill'], tips['tip'])
axes1.set_title('Scatterplot of Total Bill vs Tip')
axes1.set_xlabel('Total Bill')
axes1.set_ylabel('Tip')
# + [markdown] colab_type="text" id="Gq7i2qqwTTYo"
# ### 상자 그래프 그리기
# 하나의 연속형 변수에 대해서 분포 형태, 퍼짐정도, 이상치 여부 등을 시각화하고, 하나의 그룹 혹은 여러개의 그룹 간 비교하는데 유용하다.
# + colab={"base_uri": "https://localhost:8080/", "height": 313} colab_type="code" executionInfo={"elapsed": 1226, "status": "ok", "timestamp": 1593087231793, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="wChIZ3mLTTYp" outputId="8877a823-78df-44ec-8ffb-0974c313f563"
boxplot = plt.figure()
axes1 = boxplot.add_subplot(1, 1, 1)
axes1.boxplot(
[tips[tips['sex'] == 'Female']['tip'],
tips[tips['sex'] == 'Male']['tip']],
labels=['Female', 'Male'])
axes1.set_xlabel('Sex')
axes1.set_ylabel('Tip')
axes1.set_title('Boxplot of Tips by Sex')
# + [markdown] colab_type="text" id="P_m0iMFtTTYr"
# # 등고선 그래프 contour
#
# * 등고선 그래프로 3차원 데이터를 시각화 할 수 있다.
# * contour는 등고선만 표시하고 contourf는 색깔로 표시한다.
#
# ### meshgrid와 contour
# * plt.contour(X, Y, Z)의 X, Y, Z는 모두 2차원의 np.array이다.
# * X, Y 를 meshgrid를 사용하여 생성한다.
# + colab={"base_uri": "https://localhost:8080/", "height": 281} colab_type="code" executionInfo={"elapsed": 1350, "status": "ok", "timestamp": 1593087236292, "user": {"displayName": "\uc774\uc120\ud654", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGEcSuxiespWUAaMC6eXljdm2fXmv29ZXuZ14n=s64", "userId": "08084686575025891086"}, "user_tz": -540} id="cKqyGJi3TTYr" outputId="3d3596e5-2e66-4743-bfb7-81ddcdd5a895"
def f(x, y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
XX, YY = np.meshgrid(x, y)
ZZ = f(XX, YY)
plt.title("Contour plots")
plt.contourf(XX, YY, ZZ, alpha=.75, cmap='jet')
plt.contour(XX, YY, ZZ, colors='black')
plt.show()
# + colab={} colab_type="code" id="lxEDauWwTTYt"
| 04Visualization/01Matplotlib시각화.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Implementing FunkSVD - Solution
#
# In this notebook we will take a look at writing our own function that performs FunkSVD, which will follow the steps you saw in the previous video. If you find that you aren't ready to tackle this task on your own, feel free to skip to the following video where you can watch as I walk through the steps.
#
# To test our algorithm, we will run it on the subset of the data you worked with earlier. Run the cell below to get started.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import sparse
import svd_tests as t
# %matplotlib inline
# Read in the datasets
movies = pd.read_csv('data/movies_clean.csv')
reviews = pd.read_csv('data/reviews_clean.csv')
del movies['Unnamed: 0']
del reviews['Unnamed: 0']
# Create user-by-item matrix
user_items = reviews[['user_id', 'movie_id', 'rating', 'timestamp']]
user_by_movie = user_items.groupby(['user_id', 'movie_id'])['rating'].max().unstack()
# Create data subset
user_movie_subset = user_by_movie[[73486, 75314, 68646, 99685]].dropna(axis=0)
ratings_mat = np.matrix(user_movie_subset)
print(ratings_mat)
# -
# `1.` You will use the **user_movie_subset** matrix to show that your FunkSVD algorithm will converge. In the below cell, use the comments and document string to assist you as you complete writing your own function to complete FunkSVD. You may also want to try to complete the funtion on your own without the assistance of comments. You may feel free to remove and add to the function in any way that gets you a working solution!
#
# **Notice:** There isn't a sigma matrix in this version of matrix factorization.
def FunkSVD(ratings_mat, latent_features=4, learning_rate=0.0001, iters=100):
'''
This function performs matrix factorization using a basic form of FunkSVD with no regularization
INPUT:
ratings_mat - (numpy array) a matrix with users as rows, movies as columns, and ratings as values
latent_features - (int) the number of latent features used
learning_rate - (float) the learning rate
iters - (int) the number of iterations
OUTPUT:
user_mat - (numpy array) a user by latent feature matrix
movie_mat - (numpy array) a latent feature by movie matrix
'''
# Set up useful values to be used through the rest of the function
n_users = ratings_mat.shape[0]
n_movies = ratings_mat.shape[1]
num_ratings = np.count_nonzero(~np.isnan(ratings_mat))
# initialize the user and movie matrices with random values
user_mat = np.random.rand(n_users, latent_features)
movie_mat = np.random.rand(latent_features, n_movies)
# initialize sse at 0 for first iteration
sse_accum = 0
# header for running results
print("Optimizaiton Statistics")
print("Iterations | Mean Squared Error ")
# for each iteration
for iteration in range(iters):
# update our sse
old_sse = sse_accum
sse_accum = 0
# For each user-movie pair
for i in range(n_users):
for j in range(n_movies):
# if the rating exists
if ratings_mat[i, j] > 0:
# compute the error as the actual minus the dot product of the user and movie latent features
diff = ratings_mat[i, j] - np.dot(user_mat[i, :], movie_mat[:, j])
# Keep track of the sum of squared errors for the matrix
sse_accum += diff**2
# update the values in each matrix in the direction of the gradient
for k in range(latent_features):
user_mat[i, k] += learning_rate * (2*diff*movie_mat[k, j])
movie_mat[k, j] += learning_rate * (2*diff*user_mat[i, k])
# print results for iteration
print("%d \t\t %f" % (iteration+1, sse_accum / num_ratings))
return user_mat, movie_mat
# `2.` Try out your function on the **user_movie_subset** dataset. First try 4 latent features, a learning rate of 0.005, and 10 iterations. When you take the dot product of the resulting U and V matrices, how does the resulting **user_movie** matrix compare to the original subset of the data?
user_mat, movie_mat = FunkSVD(ratings_mat, latent_features=4, learning_rate=0.005, iters=10)
print(np.dot(user_mat, movie_mat))
print(ratings_mat)
# **The predicted ratings from the dot product are already starting to look a lot like the original data values even after only 10 iterations. You can see some extreme low values that are not captured well yet. The 5 in the second to last row in the first column is predicted as an 8, and the 4 in the second row and second column is predicted to be a 7. Clearly the model is not done learning, but things are looking good.**
# `3.` Let's try out the function again on the **user_movie_subset** dataset. This time we will again use 4 latent features and a learning rate of 0.005. However, let's bump up the number of iterations to 250. When you take the dot product of the resulting U and V matrices, how does the resulting **user_movie** matrix compare to the original subset of the data? What do you notice about your error at the end of the 250 iterations?
user_mat, movie_mat = FunkSVD(ratings_mat, latent_features=4, learning_rate=0.005, iters=250)
print(np.dot(user_mat, movie_mat))
print(ratings_mat)
# **In this case, we were able to completely reconstruct the item-movie matrix to obtain an essentially 0 mean squared error. I obtained 0 MSE on iteration 165.**
# The last time we placed an **np.nan** value into this matrix the entire svd algorithm in python broke. Let's see if that is still the case using your FunkSVD function. In the below cell, I have placed a nan into the first cell of your numpy array.
#
# `4.` Use 4 latent features, a learning rate of 0.005, and 250 iterations. Are you able to run your SVD without it breaking (something that was not true about the python built in)? Do you get a prediction for the nan value? What is your prediction for the missing value? Use the cells below to answer these questions.
ratings_mat[0, 0] = np.nan
ratings_mat
# run SVD on the matrix with the missing value
user_mat, movie_mat = FunkSVD(ratings_mat, latent_features=4, learning_rate=0.005, iters=250)
preds = np.dot(user_mat, movie_mat)
print("The predicted value for the missing rating is {}:".format(preds[0,0]))
print()
print("The actual value for the missing rating is {}:".format(ratings_mat[0,0]))
print()
assert np.isnan(preds[0,0]) == False
print("That's right! You just predicted a rating for a user-movie pair that was never rated!")
print("But if you look in the original matrix, this was actually a value of 10. Not bad!")
# Now let's extend this to a more realistic example. Unfortunately, running this function on your entire user-movie matrix is still not something you likely want to do on your local machine. However, we can see how well this example extends to 1000 users. In the above portion, you were using a very small subset of data with no missing values.
#
# `5.` Given the size of this matrix, this will take quite a bit of time. Consider the following hyperparameters: 4 latent features, 0.005 learning rate, and 20 iterations. Grab a snack, take a walk, and this should be done running in a bit.
# +
# Setting up a matrix of the first 1000 users with movie ratings
first_1000_users = np.matrix(user_by_movie.head(1000))
# perform funkSVD on the matrix of the top 1000 users
user_mat, movie_mat = FunkSVD(first_1000_users, latent_features=4, learning_rate=0.005, iters=20)
# -
# `6.` Now that you have a set of predictions for each user-movie pair. Let's answer a few questions about your results. Provide the correct values to each of the variables below, and check your solutions using the tests below.
# +
# How many actual ratings exist in first_1000_users
num_ratings = np.count_nonzero(~np.isnan(first_1000_users))
print("The number of actual ratings in the first_1000_users is {}.".format(num_ratings))
print()
# How many ratings did we make for user-movie pairs that didn't have ratings
ratings_for_missing = first_1000_users.shape[0]*first_1000_users.shape[1] - num_ratings
print("The number of ratings made for user-movie pairs that didn't have ratings is {}".format(ratings_for_missing))
# +
# Test your results against the solution
assert num_ratings == 10852, "Oops! The number of actual ratings doesn't quite look right."
assert ratings_for_missing == 31234148, "Oops! The number of movie-user pairs that you made ratings for that didn't actually have ratings doesn't look right."
# Make sure you made predictions on all the missing user-movie pairs
preds = np.dot(user_mat, movie_mat)
assert np.isnan(preds).sum() == 0
print("Nice job! Looks like you have predictions made for all the missing user-movie pairs! But I still have one question... How good are they?")
# -
| 2_Implementing_FunkSVD_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import tensorflow as tf
import datetime
from deep_tools import f
from deep_tools import DataGenerator
register=pd.read_csv('./data/user_register_log.txt',sep='\t',names=['user_id','register_day','register_type','device_type'])
launch=pd.read_csv('./data/app_launch_log.txt',sep='\t',names=['user_id','launch_day'])
create=pd.read_csv('./data/video_create_log.txt',sep='\t',names=['user_id','create_day'])
activity=pd.read_csv('./data/user_activity_log.txt',sep='\t',names=['user_id','act_day','page','video_id','author_id','act_type'])
# -
#参数
n_features=12
n_hu=5
n_device=50
n_register=7
n_days=31
data_generator=DataGenerator(register,launch,create,activity)
# +
#device_dict
device_table=register.groupby(['device_type'],as_index=False).agg({'user_id':'count'})
device_table=device_table.sort_values(by=['user_id'],ascending=False)
device_table['device_type_map']=np.arange(len(device_table))
device_table.drop('user_id',axis=1,inplace=True)
register=pd.merge(register,device_table)
device_dict={row[0]:row[-1] for index,row in register.iterrows()}
#register_dict
register_dict={row[0]:row[2] for index,row in register.iterrows()}
# +
tf.reset_default_graph()
tf.set_random_seed(10)
#Variables
with tf.variable_scope('test4'):
#变量与输入
lr=tf.placeholder(tf.float32,[],name='learning_rate')
W_out=tf.get_variable('W_out',[n_hu,1])
b_out=tf.get_variable('b_out',[1])
x=tf.placeholder(tf.float32,[None,None,n_features])
y=tf.placeholder(tf.float32,[None,None])
batch_size=tf.shape(x)[0]
seq_length=tf.shape(x)[1]
PR_input=tf.placeholder(tf.float32,[None,None,1])
device_input=tf.placeholder(tf.int32,[None])
register_input=tf.placeholder(tf.int32,[None])
date_input=tf.placeholder(tf.int32,[None])
device_embedding=tf.get_variable('device_embedding',[n_device,1],initializer=tf.zeros_initializer)
register_embedding=tf.get_variable('register_embedding',[n_register,1],initializer=tf.zeros_initializer)
date_embedding=tf.get_variable('date_embedding',[n_days,1],initializer=tf.zeros_initializer)
#RNN层
cell=tf.nn.rnn_cell.GRUCell(n_hu)
initial_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, state = tf.nn.dynamic_rnn(cell, x,
initial_state=initial_state)
#输出层
outputs=tf.reshape(outputs,[-1,n_hu])
logits=tf.matmul(outputs,W_out)+b_out
logits=tf.reshape(logits,tf.stack([batch_size,seq_length]))
device_intercept=tf.nn.embedding_lookup(device_embedding,device_input)
register_intercept=tf.nn.embedding_lookup(register_embedding,register_input)
date_intercept=tf.nn.embedding_lookup(date_embedding,date_input)
date_intercept=tf.reshape(date_intercept,tf.stack([1,seq_length]))
logits=logits+device_intercept+register_intercept+date_intercept
# +
#local_train
logits_local_train=logits[:,:-14]
label_local_train=y[:,:-14]
regularizer=tf.contrib.layers.l2_regularizer(0.00001)
penalty=tf.contrib.layers.apply_regularization(regularizer,tf.trainable_variables())
obj_local=tf.losses.sigmoid_cross_entropy(label_local_train,logits_local_train)+penalty
optimizer=tf.train.AdamOptimizer(lr)
step_local=optimizer.minimize(obj_local)
#local_test
logits_local_test=logits[:,-8]
label_local_test=y[:,-8]
#online_train
logits_online_train=logits[:,:-7]
label_online_train=y[:,:-7]
obj_online=tf.losses.sigmoid_cross_entropy(label_online_train,logits_online_train)+penalty
optimizer=tf.train.AdamOptimizer(lr)
step_online=optimizer.minimize(obj_online)
#online_test
logits_online_test=logits[:,-1]
# -
sess=tf.Session()
sess.run(tf.global_variables_initializer())
def test(strategy='local'):
if strategy=='local':
n_NA=14
date_seq=[31]+list(range(2,16))+[16]*15
variables_1=[obj_local,logits_local_train,label_local_train]
variables_2=[logits_local_test,label_local_test]
else:
n_NA=7
date_seq=[31]+list(range(2,23))+[23]*8
variables_1=[obj_online,logits_online_train,label_online_train]
variables_2=logits_online_test
obs_count,cum_loss,correct=0,0,0
user,prob,real=[],[],[]
#训练损失
for length,id_list,data_x,data_y in zip(*data_generator.get_set(strategy,'train')):
_obj,_logits_train,_label_train=sess.run(variables_1,
feed_dict={x:data_x,
y:data_y,
device_input:[device_dict[u] for u in id_list],
register_input:[register_dict[u] for u in id_list],
date_input:date_seq[-length:],
lr:0.001})
obs_count+=(length-n_NA)*len(id_list)
cum_loss+=_obj*(length-n_NA)*len(id_list)
correct+=np.sum((1*(_logits_train>0)==_label_train))
#测试损失
for length,id_list,data_x,data_y in zip(*data_generator.get_set(strategy,'test')):
_=sess.run(variables_2,
feed_dict={x:data_x,
y:data_y,
device_input:[device_dict[u] for u in id_list],
register_input:[register_dict[u] for u in id_list],
date_input:date_seq[-length:],
lr:0.001})
if strategy=='local':
_logits_test,_label_test=_
real+=list(_label_test)
else:
_logits_test=_
user+=list(id_list)
prob+=list(1/(1+np.exp(-_logits_test.reshape([-1]))))
#训练损失
print('train_loss',cum_loss/obs_count,correct/obs_count)
#测试损失
if strategy=='local':
result=pd.DataFrame({'user_id':user,'prob':prob,'label':real})
print('test_score:',f(result))
else:
result=pd.DataFrame({'user_id':user,'prob':prob})
return result
def train(strategy='local',n_obs=1000,step=1000,lr_feed=0.01):
if strategy=='local':
date_seq=[31]+list(range(2,16))+[16]*15
variables=[step_local,obj_local,label_local_train,logits_local_train]
else:
date_seq=[31]+list(range(2,23))+[23]*8
variables=[step_online,obj_online,label_online_train,logits_online_train]
for i in range(step):
length,id_list,data_x,data_y=data_generator.next_batch(strategy,n_obs)
_,los,lab,log=sess.run(variables,
feed_dict={x:data_x,
y:data_y,
device_input:[device_dict[u] for u in id_list],
register_input:[register_dict[u] for u in id_list],
date_input:date_seq[-length:],
lr:lr_feed})
sess.run(tf.global_variables_initializer())
# +
def cos_annealing_local(epoch=5):
all_result=None
for i in range(epoch):
train('local',n_obs=1000,step=2000,lr_feed=0.01)
train('local',n_obs=1000,step=2000,lr_feed=0.001)
result=test('local')
print(sess.run(penalty))
result.columns=['label','prob%s'%i,'user_id']
if i==0:
all_result=result
else:
all_result=pd.merge(all_result,result)
return all_result
def cos_annealing_online(epoch=5):
all_result=None
for i in range(epoch):
train('online',n_obs=1000,step=2000,lr_feed=0.01)
train('online',n_obs=1000,step=2000,lr_feed=0.001)
result=test('online')
print(sess.run(penalty))
result.columns=['prob%s'%i,'user_id']
if i==0:
all_result=result
else:
all_result=pd.merge(all_result,result)
return all_result
# -
#线下测试
print(datetime.datetime.now())
result=cos_annealing_local(5)
print(datetime.datetime.now())
#线上提交
print(datetime.datetime.now())
result=cos_annealing_online(5)
print(datetime.datetime.now())
#融合
result['prob']=(result.prob0+result.prob1+result.prob2+result.prob3+result.prob4)/5
result.sort_values(by='prob',ascending=False,inplace=True)
result=result.reset_index(drop=True)
result.loc[:24800,['user_id']].to_csv('output/result.csv',header=False,index=False)
| deep_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inhalt
#
# Das dritte Kapitel geht auf die Steuerung des Kontrollflusses von Programmen durch Bedingungen und Schleifen ein und deckt die folgenden Konzepte ab:
# - if, elif, else
# - Blöcke
# - while Schleifen
# - for Schleifen
#
#
# # Bedingte Ausführung
#
# Ein Kernkonzept vieler Programmiersprachen ist die bedingte Ausführung von Teilen des Quelltextes. Hierdurch lassen sich Entscheidungen treffen und Abläufe definieren. In Python gibt es hierfür, wie in vielen anderen Sprachen auch, das Schlüsselwort ```if```.
print("do something")
if True:
print("executed because the condition is true")
if False:
print("not executed because the condition is false")
# Natürlich macht ein Beispiel wie oben, wo fest "True" oder "False" im Quelltext steht, in der Regel keinen Sinn. Stattdessen nutzt man Bedingungen.
# +
value = int(input("Please type an integer: "))
if value%2==0:
print("The number is even.")
# -
# Häufig will man das eine Aktion ausgeführt, falls eine Bedingung erfüllt ist, und eine andere, falls die Bedingung nicht erfüllt ist. Hierzu gibt es das Schlüsselwort ```else```. Der Block nach dem ```else``` wird nur dann ausgeführt, falls die Bedingung nicht zu trifft. Man hat also ein "entweder/oder" Konstrukt.
# +
value = int(input("Please type an integer: "))
if value%2==0:
print("The number is even.")
else:
print("The number is odd.")
# -
# Will man zwischen mehr als zwei Fällen unterscheiden, gibt es in anderen Programmiersprachen zum Beispiel ```switch``` Anweisungen oder das hintereinanderschalten von mehreren if/else blöcken (```else if```). In Python gibt es einen ähnlichen Ansatz durch das Schlüsselwort ``elif``. Hierdurch lassen sich mehrere Bedingungen kombinieren. Man beginnt mit einem ```if```, dann folgen beliebig viele ```elif```s, zuletzt kann man auch noch ein ```else``` benutzen.
# +
value = int(input("Please type an integer: "))
if value<0:
print("The number is negative.")
elif value<10:
print("The number is postive and less than 10.")
elif value<100:
print("The number is greater than 10 and less than 100.")
else:
print("The number is greater than or equal to 100.")
# -
# # Blöcke
#
# Als Nebenprodukt der ```if``` Anweisungen wurden oben bereits Blöcke verwendet. Blöcke sind Quelltextteile, die Sequentiel abgearbeitet werden. In Java werden Blöcke zum Beispiel durch Klammerung definiert (```{}```). In Python wird auf derartige Klammern verzichtet. Stattdessen zählt die die Einrückung. Zeilen in der gleichen Einrückungstiefe gehören zum gleichen Block. Üblicherweise verwendet man [vier Leerzeichen pro Block als Einrückungstiefe](https://www.python.org/dev/peps/pep-0008/#id17), eine beliebige Anzahl von Leerzeichen oder auch Tabulatoren ist jedoch möglich. Wichtige zu beachtende Eigenschaften bezüglich der Einrückung in Python sind:
# - Es müssen entweder Leerzeichen oder Tabulatoren für die Einrückung verwendet werden. Beides Gleichzeitig ist nicht erlaubt.
# - Innerhalb eines Blockes muss alles die gleiche Einrückung haben.
# - Auch wenn vieles erlaubt ist, sollte man sich strikt an die "vier Leerzeichen" Vorgabe halten, da Quelltext sonst schnell unleserlich wird.
#
# Man kann Blöcke ineinerander Verschachteln, wenn man zum Beispiel innerhalb eines Blocks eine weitere Bedingung überprüfen möchte.
value = int(input("Please type an integer: "))
if value%2==0:
print("The number is even.")
if value<0:
print("The number is also negative.")
# Komplett leere Blöcke, also Blöcke ohne jeglichen Quelltext sind in Python nicht möglich. Dies folgt direkt aus der Tatsache, das Blöcke über die Einrückung des Quelltextes definiert werden. Benötigt man dennoch einen leeren Block, zum Beispiel aus syntaktischen Gründen oder weil man einen nicht behandelten Sonderfall kommentieren möchte, kann man dies mit Hilfe des Schlüsselworts ```pass``` umsetzen.
value = int(input("Please type an integer: "))
if value%2==0:
print("The number is even.")
if value<0:
print("The number is also negative.")
else:
pass # implement handling of positive even numbers later
# # Schleifen
#
# Schleifen sind das Mittel der Wahl, um die gleichen Anweisungen mehrfach auszuführen. In der Regel ändern sich die Daten mit jeder Wiederholung der Ausführung einer Schleife. In Python gibt es zwei Arten von Schleifen: ```while``` Schleifen und ```for``` Schleifen.
#
# ## While Schleifen
#
# Das Konzept von ```while``` ist, dass ein Block solange wiederholt wird, bis eine festgelegte Bedingung erfüllt wird. Die Syntax ist daher grundsätzlich ähnlich zum ```if```.
print("I can count to ten!")
count = 0
while count<10:
count += 1 # same as count=count+1
print(count)
# Mit den bisher eingeführten Konzepten ist Python bereits Turingvollständig, man kann also theoritisch jeden Algorithmus programmieren. Das Beispiel unten zeigt, wie man mit Hilfe einer ```while``` Schleife und dem Heron-Verfahren die Quadratwurzel einer beliebigen ganzen Zahl $a$ schätzen kann. Hierbei schätzt man eine Folge von Zahlen, so dass
# \begin{equation*}
# x_{n+1} = \frac{1}{2} \cdot \left(x_n + \frac{a}{x_n} \right).
# \end{equation*}
# Die Folge konvergiert für eine beliebe positive Zahl als Startwert $x_1$ gegen $\sqrt{a}$. Die Abbruchbedingung der Schleife ist die Abweichung des Quadrats von $x_n$ von $a$.
value = float(input("Please type a positive number: ")) # input is a from Herons method
guess = 1 # guess is the x_n from Herons method
while(abs(guess*guess-value)>0.0001): # abs() returns the absolute value
guess = (1/2)*(guess+value/guess)
print(guess)
# ## For Schleifen
#
# Der zweite Typ von Schleifen in Python sind ```for``` Schleifen. Mit ```for``` Schleifen iteriert man über Sequenzen von Daten, zum Beispiel Listen oder Mengen. Der Vorteil von ```for``` Schleifen ist, dass man sehr einfach Aktionen für alle Elemente einer Sequenz durchführen kann, zum Beispiel alle Elemente einer Liste ausgeben.
for item in [1, 2, 3, 4, 5, 6]:
print(item)
# Da Zeichenketten auch Sequenzen sind, kann man auch über die einzelnen Buchstaben iterieren. Auf diese Art kann man zum Bespiel nur die Großbuchstaben ausgeben.
for char in "Hello World!":
if char.isupper():
print(char) # only print uppercase letters
# Die ```for``` Schleifen in Python sind *for-each* Schleifen, da für jedes Element einer Sequenz etwas durchgeführt wird. Dies ist anders als in *C-Style* ```for``` Schleifen, die über ein Inkrement und ein Abbruchkriterium definiert werden. In C-Style ```for``` Schleifen iteriert man daher häufig über den index einer Sequenz. Will man zum Beispiel über zwei Listen gleichzeitig iterieren, muss man auch in Python auf ähnliche Art durch Sequenzen iterieren können. An dieser Stelle hilft die ```range()``` Funktion. Mit Hilfe von ```range``` lassen sich logische Sequenzen über Zahlen definieren, basierend auf einem Startwert, einem Stopwert, und einer Schrittgröße. Da es sich bei ```range``` um logische Sequenzen handelt, werden die einzelnen Werte nicht generiert. Das ist insbesondere bei einer besonders großen Anzahl von Werten innerhalb einer ```range``` hilfreich, da hierdurch viel Laufzeit und Arbeitsspeicher gespart wird. Will man die Werte eine ```range``` physisch im Arbeisspeicher generieren, muss man diese in eine Liste umwandeln.
print(range(10)) # logical range of values between 0 (inclusive) and 10 (exclusive), values not created
print(list(range(10))) # creates a list from the range, values are created
print(list(range(1,10))) # range between 1 (inclusive) and 10 (exclusive)
print(list(range(1,10,2))) # range between 1 (inclusive) and 10 (exclusive) and a step size of 2
print(list(range(9,-1,-1))) # negative steps also possible
print(list(range(0,10,-1))) # be careful that not to mix up start and stop, this could lead to empty ranges
# Durch ```range``` ist es sehr einfach mit Python eine C-Style ```for``` Schleife umzusetzen.
list_one = [1, 2, 4, 8, 16]
list_two = [1, 3, 9, 27, 81]
for i in range(len(list_one)):
print(list_one[i]+list_two[i])
# ## break and continue
#
# Es gibt Situationen, in denen man einen Schleifendurchlauf vorzeitig beenden möchte. Hierzu dienen die Schlüsselwörter ```break``` und ```continue```. Bei einem ```break``` verlässt man den Block der Schleife komplett. Ein typischer Anwendungsfall von ```break``` ist das Suchen nach dem ersten Auftauchen eines bestimmten Ereignisses.
for char in "aldkfjsdlfkasldkjsadJlaskdKLjasd":
if char.isupper():
print(f"found first upper case character: {char}")
break # stop loop, we only want the first occurance
# Bei einem ```continue``` wird der aktuelle Schleifendurchlauf vorzeitig beendet und der nächste Durchlauf beginnt. ```continue``` wird typischerweise aus Effizienzgründen eingesetzt um den Rest vom Schleifendurchlauf zu überspringen. Ein weiterer Grund für den Einsatz von ```continue``` ist die Reduktion der Blocktiefe. Im folgenden Beispiel wird durch das ```continue``` kein ```else``` benötigt, wodurch die Blocktiefe der zweiten ```print``` Anweisung reduziert ist.
for value in range(1,10):
if value%2==0:
print(f"even number: {value}")
continue
print(f"odd number: {value}")
# Bitte beachten sie, dass es sich bei ```break``` und ```continue``` um *unconditional jumps* handelt, also Sprünge innerhalb des Kontrollflusses, die nicht direkt an eine Bedindung geknüpft sind. Derartige Sprünge sollten nach Möglichkeit vermieden werden. Man sollte zum Beispiel ```continue``` nur zur Reduzierung der Blocktiefe benutzen, wenn besonders viele Anweisungen betroffen sind und nicht für einzelne Anweisungen wie im obigen Beispiel.
# ## enumerate
#
# ```enumerate``` ist eine Hilfsfuntion, mit der man über Elemente einer Sequenz mit einer ```for``` Schleife iterieren kann, so dass man auch den Index des aktuellen Elements kennt. Der Rückgabewert von ```enumerate``` ist ein Iterator über Tupel, wobei die Tupel aus Paaren vom Index und Wert von Elementen einer Sequenz bestehen.
my_list = [1,2,3,4]
for enumerated_item in enumerate(my_list):
print(enumerated_item)
# Die Tupel die bei ```enumerate``` entstehen werde üblicherweise direkt mit Hilfe von unboxing zwei Variablen zugewiesen, dem Index und dem Element der Sequenz.
my_list = [1,2,3,4]
for i,item in enumerate(my_list):
print(f"item at index {i}: {item})")
| script/Kapitel 3 - Kontrollfluss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.0 64-bit
# metadata:
# interpreter:
# hash: cf85b414d3663472de89104473c842eaab37d7b845999caf56a47ccda76ea2f8
# name: python3
# ---
# A partir del archivo food_100, se pide:
#
# 1. Limpiar el archivo borrando las columnas Unnamed.
# 2. ¿Qué porcentaje de valores NaN hay en cada columna?
# 3. ¿Tendría algún sentido clasificar el nombre de los alimentos a partir del top5 de columnas numéricas con menos valores NaN?
# +
import pandas as pd
df = pd.read_csv("data/food_100.csv")
df
# -
# ### 1. Borrar columnas Unnamed
df.shape
# +
columns_l = list(df.columns)
for i in columns_l:
if 'Unnamed' in i:
print('Dropped: ',i)
df = df.drop(columns='Unnamed: 0')
df.shape
# -
df.info()
# ### 2. Porcentage de Nans en columnas
missing_prcnt = df.isnull().sum() * 100 / len(df)
missing_prcnt = missing_prcnt.sort_values()
missing_prcnt
missing_prcnt.hist()
# ### 3. ¿Tendría algún sentido clasificar el nombre de los alimentos a partir del top5 de columnas numéricas con menos valores NaN?
# Buscar columna con nombre de alimentos
for i in columns_l:
if 'name' in i:
print(i)
# +
# Dataframe con top 5 columnas numéricas con menos nans y nombre de alimento
study_df = df[missing_prcnt.index.values].select_dtypes(include=['float64', 'int64']).iloc[:,:5].join(df['product_name'])
study_df
# -
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.model_selection import train_test_split
# Hacer modelo de clasificación para ver si las columnas seleccionadas servivrían para clasificar los alimentos
df_n = study_df.dropna()
X = df_n.iloc[:,: 5].values
y = df_n['product_name'].values.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)
from sklearn.svm import SVC
# +
# SVM
svr_poly = SVC(C=1000, kernel='rbf', gamma=10)
svr_poly.fit(X_train, y_train)
print('Train score =', svr_poly.score(X_train, y_train))
print('Test score =', svr_poly.score(X_test, y_test))
# +
# Logarithmic regression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
print('Training data accuracy -->', log_reg.score(X_train, y_train))
print('Test data accuracy -->', log_reg.score(X_test, y_test))
# +
# Knn
knn_mod = KNeighborsClassifier(n_neighbors=7)
knn_mod.fit(X_train, y_train)
print('Training data accuracy -->', knn_mod.score(X_train, y_train))
print('Test data accuracy -->', knn_mod.score(X_test, y_test))
# -
# Según los diferentes modelos de clasificación utilizados, parece que las columnas seleccionadas para clasificar los alimentos no es buena.
| 2_Ejercicios/Modulo2/5.DeciTrees_Kgboost_gridserarch/exercises/2.analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="A1SouZXWbOPb"
# INSTALL REQUIREMENTS
# !pip install transformers==3.5.1
# !pip install pyconll
# !pip install pkbar
# !pip install torch==1.4.0
# + id="w9s_coERT5vZ"
# IMPORTS
from io import open
import pathlib
import pandas as pd
import numpy as np
import re
import os
import random
import time
import pyconll
import pkbar
import torch
from torch import nn
from torch.nn.functional import softmax
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
# %load_ext tensorboard
import transformers
from transformers import (
BertForTokenClassification,
AdamW,
BertModel,
BertConfig,
AlbertForTokenClassification,
)
from transformers import AutoTokenizer, AutoModel, AutoConfig
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import matplotlib.pyplot as plt
# %matplotlib inline
# SEEDING
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed_val)
# DEVICE
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print("using ", device)
# + [markdown] id="jDY6mHkYASLb"
# ### HYPERPARAMS
# + id="AOXZYKBRAU9p"
# MODEL HYPERPARAMS
LAYER_NO = 11
DROPOUT = 0.1
MODEL_NAME = "ai4bharat/indic-bert"
# OPTIMIZATION HYPERPARAMS
LEARNING_RATE = 1e-2
BATCH_SIZE = 128
EPOCHS = 30
PATIENCE = 2
WEIGHT_DECAY = 1e-6
# + id="tBjxFY8B6wSA"
# PATH
TRAIN_PATH = "train.conllu"
TEST_PATH = "test.conllu"
DEV_PATH = "dev.conllu"
SAVE_PATH = "checkpoints"
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
os.mkdir(SAVE_PATH / MODEL_NAME)
experiment_id = "{}{}{}".format("pos", "indic-bert", LAYER_NO)
# + id="XuQXRNEFTOjj"
# GENERATE LISTS OF TEXT TOKENS AND CORRESPONDING POS TAGS
def generate_data(dataset_path):
"""
takes a conll file and returns token list and pos tag list
"""
dataset = pyconll.load_from_file(dataset_path)
token_text = []
token_postag = []
for sentence in dataset:
tokens = []
tags = []
for token in sentence:
tokens.append(token.form)
tags.append(token.upos)
token_text.append(tokens)
token_postag.append(tags)
return token_text, token_postag
# + id="1T2XmnS0z7ap"
# EXTRACT TOKENS AND POS TAGS
train_texts, train_tags = generate_data(TRAIN_PATH)
val_texts, val_tags = generate_data(DEV_PATH)
test_texts, test_tags = generate_data(TEST_PATH)
# + id="CExfZXpVwEiO"
# CALCULATE LENGTH OF TRAIN, VAL AND TEST DATASETS
train_len = len(train_tags)
val_len = len(val_tags)
test_len = len(test_tags)
# + id="E61G7unu0xgJ"
# FIND NUMBER OF UNIQUE TAGS
tags_total = train_tags + val_tags + test_tags
# ENCODE TAGS TO INTEGERS AND VICE-VERSA
unique_tags = set(tag for doc in tags_total for tag in doc)
tag2id = {tag: id for id, tag in enumerate(unique_tags)}
id2tag = {id: tag for tag, id in tag2id.items()}
# + id="lGeOS944nc_X"
# LINEAR PROBING MODEL
class BERTTokenClassifierProbe(AlbertForTokenClassification):
def __init__(self, config, LAYER_ID=12, DROPOUT=0.0):
super().__init__(config)
# layer to extract output from, can be any integer in range [1,12]
self.layer_id = LAYER_ID
self.num_labels = config.num_labels
# enable outputing hidden states and freeze BertModel
self.bert = AutoModel.from_pretrained(MODEL_NAME, config=config)
for parameter in self.bert.parameters():
parameter.requires_grad = False
# linear probe
self.dropout = nn.Dropout(DROPOUT)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# taking output from the specified layer
sequence_output = outputs[2][self.layer_id]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss,
labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(labels),
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# + id="wS6_x5TMNfLL"
# TOKENIZER
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True)
# PRE-TRAINED MODEL and PROBE
config = AutoConfig.from_pretrained(
MODEL_NAME, output_hidden_states=True, num_labels=len(unique_tags)
)
model = BERTTokenClassifierProbe(config, LAYER_ID=LAYER_NO, DROPOUT=DROPOUT).to(device)
# + id="J_Zsr9IDGzyJ"
# TOKENIZE DATA
# adding max_length = 128 for Indic-Bert(wasn't reqd in mBERT)
train_encodings = tokenizer(
train_texts,
is_split_into_words=True,
return_offsets_mapping=True,
padding=True,
truncation=True,
max_length=128,
)
val_encodings = tokenizer(
val_texts,
is_split_into_words=True,
return_offsets_mapping=True,
padding=True,
truncation=True,
max_length=128,
)
test_encodings = tokenizer(
test_texts,
is_split_into_words=True,
return_offsets_mapping=True,
padding=True,
truncation=True,
max_length=128,
)
# + id="w3UHtFJxv9YY"
# CONVERT THE TOKENS TO THEIR RESPECTIVE INTEGER IDS
def convert_token_to_id(encodings, length):
token_to_id = []
for i in range(length):
token_to_id.append(encodings[i].tokens)
return token_to_id
train_token_ids = convert_token_to_id(train_encodings, train_len)
val_token_ids = convert_token_to_id(val_encodings, val_len)
test_token_ids = convert_token_to_id(test_encodings, test_len)
# + id="CxQsPKTYKK8R"
# ENCODE THE TAGS IN A SPECIFIC MANNER
def encode_tags(tags, encodings, tokenizer, token_id):
"""
takes tokenizer encodings as input,
"""
labels = [
[tag2id[tag] for tag in doc] for doc in tags
] # convert pos tags to their integer ID
encoded_labels = []
labels_index = []
for doc_labels, doc_offset, doc_token_id in zip(
labels, encodings.offset_mapping, token_id
):
# create an empty array of -100
doc_enc_labels = (
np.ones(len(doc_offset), dtype=int) * -100
) # array of length=number of word piece tokens in sentence
arr_offset = np.array(doc_offset) # convert doc_offset to numpy array
# set labels whose first offset position is 0 (all other sub-words other than initial one) and the second is not 0 (CLS and PAD token)
a = (arr_offset[:, 0] == 0) & (
arr_offset[:, 1] != 0
) # true at required labels and false were the labels need to be ignored
# print(doc_labels)
# print(doc_offset)
li = []
countl = 0 # count of doc_labels (original labels)
countel = 0 # count of doc_enc_labels (encoded labels)
for j in a: # iterate through every element of 'a'
if j:
if (
tokenizer.convert_tokens_to_ids(doc_token_id[countel]) != 8
): # ignore the extra '_' token generated due to indicbert tokenizer
li.append(countel)
doc_enc_labels[countel] = doc_labels[
countl
] # if true append the label from doc_labels to the new required encoded labels list
countl = countl + 1
countel = countel + 1
else: # if false skip that label from doc_label and move to the next index at doc_enc_labels
countel = countel + 1
encoded_labels.append(doc_enc_labels.tolist())
labels_index.append(li)
return encoded_labels, labels_index
train_labels, train_labels_index = encode_tags(
train_tags, train_encodings, tokenizer, train_token_ids
)
val_labels, val_labels_index = encode_tags(
val_tags, val_encodings, tokenizer, val_token_ids
)
test_labels, test_labels_index = encode_tags(
test_tags, test_encodings, tokenizer, test_token_ids
)
# + id="g3PgrqeOj7nX"
# DATASETS
class POSdataset(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
train_dataset = POSdataset(train_encodings, train_labels)
val_dataset = POSdataset(val_encodings, val_labels)
test_dataset = POSdataset(test_encodings, test_labels)
# + id="1bWAinOtz8KK"
# DATALOADERS
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
optim = AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
# + id="NbRl0CcbhAl4"
# TENSORBOARD LOGGER
writer = SummaryWriter(log_dir=str(SAVE_PATH / MODEL_NAME), comment=experiment_id)
# + colab={"background_save": true} id="Z_3gdSZMtiLY"
# TRAIN PROBE
min_val_loss = 999999
checkpoint_epoch = 0
COUNTER = 0
for epoch in range(EPOCHS):
# progress bar
print("\nTraining Epoch: %d/%d" % (epoch + 1, EPOCHS))
kbar = pkbar.Kbar(target=len(train_loader), width=100)
model.train()
epoch_loss = 0.0
for batch_id, batch_data in enumerate(train_loader):
# forward pass
optim.zero_grad()
input_ids = batch_data["input_ids"].to(device)
attention_mask = batch_data["attention_mask"].to(device)
# token_type_ids = batch['token_type_ids'].to(device)
labels = batch_data["labels"].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
# backward pass
loss = outputs[0]
loss.backward()
optim.step()
# progress & logging
epoch_loss += loss.item()
kbar.update(batch_id, values=[("Training Loss", loss.item())])
writer.add_scalar(
"Running Loss", loss.item(), epoch * len(train_loader) + batch_id
)
# log epoch training loss
writer.add_scalar("Epoch Train Loss", epoch_loss / len(train_dataset), epoch + 1)
# validation
val_loss = 0.0
model.eval()
# progress bar
print("\nValidating Epoch: %d/%d" % (epoch + 1, EPOCHS))
kbar = pkbar.Kbar(target=len(val_loader), width=64)
with torch.no_grad():
for batch_id, batch_data in enumerate(val_loader):
input_ids = batch_data["input_ids"].to(device)
attention_mask = batch_data["attention_mask"].to(device)
# token_type_ids = batch['token_type_ids'].to(device)
labels = batch_data["labels"].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs[0]
# progress & logging
val_loss += loss.item()
kbar.update(batch_id, values=[("Validation Loss", loss.item())])
# log epoch validation loss
writer.add_scalar("Epoch Valid Loss", val_loss / len(val_dataset), epoch + 1)
if (val_loss / len(val_dataset)) < min_val_loss:
print("\nModel Optimized! Saving Weights...", "\n")
min_val_loss = val_loss / len(val_dataset)
checkpoint_epoch = epoch + 1
torch.save(
model.classifier.state_dict(),
SAVE_PATH / MODEL_NAME / (experiment_id + ".pt"),
)
COUNTER = 0
else:
COUNTER += 1
if COUNTER >= PATIENCE:
print("\nEarly stopping!\n")
break
# + [markdown] id="Ck-8bbuwwKA7"
# # Prediction on Test
# + id="V4Y73rE2KlCf"
# PREDICTIONS ON TEST
preds = []
ground_truth = []
model.classifier.load_state_dict(
torch.load(SAVE_PATH / MODEL_NAME / (experiment_id + ".pt"))
)
model.eval()
with torch.no_grad():
for batch_id, batch_data in enumerate(test_loader):
labels_index = []
input_ids = batch_data["input_ids"].to(device)
attention_mask = batch_data["attention_mask"].to(device)
# token_type_ids = batch['token_type_ids'].to(device)
labels = batch_data["labels"].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
logits = outputs[1]
soft = softmax(logits, dim=2)
# get predicted pos tag ids
top_p, top_class = soft.topk(1, dim=2)
tt = top_class.squeeze()
# retrieve relevant indices i.e. first subwords only
for a in range(len(labels)):
temp = []
for b in range(len(labels[a])):
if labels[a][b] != -100:
temp.append(b)
labels_index.append(temp)
# store predictions of relevant indices
for i in range(len(labels_index)):
for j in labels_index[i]:
preds.append(tt[i][j].item())
ground_truth.append(labels[i][j].item())
# + id="T59_kfPRercv"
# USE PREDS LIST AND GROUND TRUTH LIST TO GET ACC & F1
acc = accuracy_score(ground_truth, preds)
prec, recall, f1, _ = precision_recall_fscore_support(
ground_truth, preds, average="weighted"
)
print("Accuracy: {}, F1: {}, Prec: {}, Recall: {}".format(acc, f1, prec, recall))
# + id="C-XFcMlexe8w"
# PRINT METRICS
print("accuracy", round(acc, 4))
print("F1", round(f1, 4))
print("Precision", round(prec, 4))
print("recall", round(recall, 4))
# + id="hmHuy6hwiKAN"
# CLOSING LOGS
writer.add_hparams(
{
"model": MODEL_NAME,
"layer": LAYER_NO,
"bs": BATCH_SIZE,
"n_epochs": EPOCHS,
"checkpoint_epoch": checkpoint_epoch,
"lr": LEARNING_RATE,
"dropout": DROPOUT,
"patience": PATIENCE,
"weight_decay": WEIGHT_DECAY,
},
{
"validation_loss": min_val_loss,
"test_acc": acc,
"test_f1": f1,
"test_prec": prec,
"test_recall": recall,
},
)
writer.flush()
# + id="FBPscCz7BeTG"
# VISUALIZE LOGS
print(SAVE_PATH / MODEL_NAME)
# %tensorboard --logdir "checkpoints/ai4bharat/indic-bert"
| notebooks/PoS_Tagging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating code coverage reports for an nbdev project
#
# We can run tests in parallel and get coverage with [pytest-cov](https://github.com/pytest-dev/pytest-cov).
#
# If you'd like to try this:
# - you might need to use an [editable install of nbdev](https://github.com/fastai/nbdev/#installing)
# - install pytest-cov and its dependencies
# - copy [test_nbs.py](https://github.com/pete88b/decision_tree/blob/master/test_nbs.py) to your nbdev project
# - then run `pytest --cov=[your lib name]`
#
# Feel free to join [the discussion](https://forums.fast.ai/t/nbdev-code-coverage-n-tests/73993/6) (o:
# # Overview of this module
#
# **Note: This is probably not the best way to get coverage - but I'm leaving this content in case it's useful**
#
# **Note:** Until the next `nbdev` release you need to use an [editable install](https://github.com/fastai/nbdev/#installing), as this module uses new functions like `split_flags_and_code`.
#
# Feel free to use `pytest` etc but to follow these examples, you'll just need `coverage`.
#
# This notebook creates `testcoverage.py` which is not tied to the decision_tree project (so you can just download [testcoverage.py](https://github.com/pete88b/decision_tree/blob/master/decision_tree/testcoverage.py) if you like)
#
# Running `testcoverage.py` will:
# - create a new folder in your nbdev project `[lib_path]_test`
# - delete all test scripts in `[lib_path]_test`
# - write a test script to `[lib_path]_test` for each notebook in `[nbs_path]`
# - and a `run_all.py` to run all test scripts in one go
#
# To run create a test coverage report:
# - cd to `nbs_path` of the project you want to test
# - create test scripts with `python [full path ...]/testcoverage.py`
# - `coverage run --source=[lib_path] [lib_path]_test/run_all.py`
# - `coverage report`
#
# Creating a test coverage report for fastai2 in my env looks like:
# ```
# # cd /home/peter/github/pete88b/fastai2/nbs
#
# python /home/peter/github/pete88b/decision_tree/decision_tree/testcoverage.py
#
# coverage run --source=/home/peter/github/pete88b/fastai2/fastai2 /home/peter/github/pete88b/fastai2/fastai2_test/run_all.py
#
# coverage report
# ```
# *Note: this ↑ fails very quickly as fastai2 tests use things that are not available in plain python.*
#
# ## What next?
# - see if running tests in plain python is useful
# - it might be true that the tests of some/most projects don't need any ipython
# - make artifacts (like images/mnist3.png) available to the test scripts
# - so you don't have to be in the nbs folder to run tests
# - see if we can get coverage when running tests with ipython
# - this looks promising https://github.com/computationalmodelling/nbval
# - see if there is a nice way to separate plain python tests and ipython tests?
#
# ## Details details ...
#
# I chose to "import" the module being tested (rather than write all code cells to the test script) so that:
# - we are testing the library created by nbdev
# - because the things we deliver are .py files, I can't help thinking that these are what we should be testing
# - we could use the test scripts to test a pip installed version of the library
# - i.e. we are testing the result of the full build, package and delivery process
from nbdev import *
# %nbdev_default_export testcoverage
# %nbdev_export
from nbdev.export import *
from nbdev.imports import *
# %nbdev_export
def write_imports(test_file,exports):
"write import statements to the test script for all modules exported to by the nb we're converting"
# export is None if cell doesn't have an nbdev export flag (%nbdev_export, %nbdev_export_internal ...)
for export in {export[0] for export in exports if export}:
export_parts=export.split('.')
b=export_parts.pop()
export_parts.insert(0,Config().lib_name)
a='.'.join(export_parts)
test_file.write(f"""
from {a} import {b}
for o in dir({b}):
exec(f'from {a}.{b} import {{o}}')
""")
# the test scipt will import everything returned by `dir(module)` because we need the test code to run as if it's in the module we're testing
# %nbdev_export
def write_test_cell_callback(i,cell,export,code):
"Return the code to be written to the test script or `None` to not write anything for `cell`"
things_to_exclude = ['notebook2script','show_doc']
if export: return None # if it's exported to the library, don't add to test script
for thing_to_exclude in things_to_exclude: # TODO: make this better
if thing_to_exclude in code: return None
return re.sub(r'^\s*(%|!)', r'#\1', code, flags=re.MULTILINE)
# %nbdev_export
def write_test_cells(test_file,nb,exports):
"Writes the source of code cells to the test script"
sep = '\n'* (int(Config().get('cell_spacing', '1'))+1)
cells = [(i,c,e) for i,(c,e) in enumerate(zip(nb['cells'],exports)) if c['cell_type']=='code']
for i,c,e in cells:
code_lines = split_flags_and_code(c)[1]
code = sep + '\n'.join(code_lines)
code = re.sub(r' +$', '', code, flags=re.MULTILINE)
code = write_test_cell_callback(i,c,e,code)
if code: test_file.write(code)
# %nbdev_export
def notebook2testscript():
"Convert notebooks to test scripts"
test_path=Path(str(Config().lib_path)+'_test')
test_path.mkdir(parents=True, exist_ok=True)
for old_file in test_path.glob('test_*.py'): old_file.unlink()
print('Removed all test_*.py files from',test_path)
files = [f for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_')]
for nb_file in sorted(files):
test_file_name = test_path/f'test_{nb_file.stem.replace("-","_")}.py'
print('Converting', nb_file.name, 'to\n ', test_file_name)
file_path = os.path.relpath(nb_file, Config().config_file.parent).replace('\\', '/')
with open(test_file_name, 'w', encoding='utf8') as test_file:
test_file.write(f"# AUTOGENERATED! DO NOT EDIT! File to edit: {file_path} (unless otherwise specified).\n")
nb=read_nb(nb_file)
default_export=find_default_export(nb['cells'])
exports = [is_export(c, default_export) for c in nb['cells']]
write_imports(test_file,exports)
write_test_cells(test_file,nb,exports)
print('Writing',test_path/'run_all.py')
with open(test_path/'run_all.py', 'w', encoding='utf8') as run_all_file:
for nb_file in sorted(files): run_all_file.write(f'import test_{nb_file.stem.replace("-","_")}\n')
# %nbdev_export
if __name__ == "__main__" and not IN_NOTEBOOK:
notebook2testscript()
# %nbdev_hide
notebook2script()
# # PB notes
# - convert all notebooks that don't start with `_`
# - import default_export of notebook
# - import things exported to other modules
# - handle nbdev test flags - TODO
# - create `test_[notebook name].py`
# - write code of test cells to `test_[notebook name].py`
# - exclude show_doc, notebook2script, cmd calls etc TODO
# ```
# coverage run --source=/home/peter/github/pete88b/decision_tree/decision_tree /home/peter/github/pete88b/decision_tree/decision_tree_test/test_00_core.py
#
# coverage run --source=/home/peter/github/pete88b/decision_tree/decision_tree /home/peter/github/pete88b/decision_tree/decision_tree_test/run_all.py
# ```
# ```
# # cd /home/peter/github/pete88b/decision_tree
#
# python /home/peter/github/pete88b/decision_tree/decision_tree/testcoverage.py
#
# coverage run --source=/home/peter/github/pete88b/decision_tree/decision_tree /home/peter/github/pete88b/decision_tree/decision_tree_test/run_all.py
#
# coverage report
# ```
| 80_test_coverage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Extract an individual's sparse traces and synthesised traces for illustrative purpose
# + pycharm={"name": "#%%\n"}
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# + pycharm={"name": "#%%\n"}
import os
import subprocess
import sys
import yaml
import geopandas as gpd
def get_repo_root():
"""Get the root directory of the repo."""
dir_in_repo = os.path.dirname(os.path.abspath('__file__')) # os.getcwd()
return subprocess.check_output('git rev-parse --show-toplevel'.split(),
cwd=dir_in_repo,
universal_newlines=True).rstrip()
sys.path.append(get_repo_root())
ROOT_dir = get_repo_root()
import lib.helpers as helpers
import lib.models as models
import lib.saopaulo as saopaulo
import lib.genericvalidation as genericvalidation
with open(ROOT_dir + '/lib/regions.yaml') as f:
region_manager = yaml.load(f, Loader=yaml.FullLoader)
# + pycharm={"name": "#%%\n"}
class RegionParaGenerate:
def __init__(self, region=None):
if not region:
raise Exception("A valid region must be specified!")
self.region = region
self.path2visits = ROOT_dir + f'/dbs/{region}/visits/'
if not os.path.exists(self.path2visits):
os.makedirs(self.path2visits)
self.path2geotweets = ROOT_dir + f'/dbs/{region}/geotweets.csv'
if not os.path.exists(self.path2geotweets):
raise Exception("The geotweets of the input region do not exist.")
self.geotweets = None
self.visits = None
# Load region data
self.region_info = region_manager[self.region]
self.zones = None
self.boundary = None
def country_zones_boundary_load(self):
# The boundary to use when removing users based on location.
zones_loader = self.region_info['zones_loader']
metric_epsg = self.region_info['country_metric_epsg']
zone_id = self.region_info['country_zone_id']
zones_path = self.region_info['country_zones_path']
if zones_loader == 1:
zones = gpd.read_file(ROOT_dir + zones_path)
zones = zones.loc[zones[zone_id].notnull()]
zones = zones.rename(columns={zone_id: "zone"})
zones.zone = zones.zone.astype(int)
self.zones = zones.loc[zones.geometry.notnull()].to_crs(metric_epsg)
self.boundary = self.zones.assign(a=1).dissolve(by='a').simplify(tolerance=0.2).to_crs("EPSG:4326")
def load_geotweets(self, only_weekday=True, only_domestic=True):
geotweets = helpers.read_geotweets_raw(self.path2geotweets)
if only_weekday:
# Only look at weekday trips
geotweets = geotweets[(geotweets['weekday'] < 6) & (0 < geotweets['weekday'])]
# Check if keeps only domestic geotagged tweets
if only_domestic:
geotweets = gpd.GeoDataFrame(
geotweets,
crs="EPSG:4326",
geometry=gpd.points_from_xy(geotweets.longitude, geotweets.latitude)
)
geotweets = gpd.clip(geotweets, self.boundary.convex_hull)
geotweets.drop(columns=['geometry'], inplace=True)
geotweets = geotweets.set_index('userid')
# Remove users who don't have home visit in geotweets
home_visits = geotweets.query("label == 'home'").groupby('userid').size()
geotweets = geotweets.loc[home_visits.index]
# Remove users with less than 20 tweets
tweetcount = geotweets.groupby('userid').size()
geotweets = geotweets.drop(labels=tweetcount[tweetcount < 20].index) # This is for domestic trip generation
# Remove users with only one region
regioncount = geotweets.groupby(['userid', 'region']).size().groupby('userid').size()
geotweets = geotweets.drop(labels=regioncount[regioncount < 2].index)
# Ensure the tweets are sorted chronologically
self.geotweets = geotweets.sort_values(by=['userid', 'createdat'])
def visits_gen(self, p=None, gamma=None, beta=None, days=None):
visit_factory = models.Sampler(
model=models.PreferentialReturn(
p=p,
gamma=gamma,
region_sampling=models.RegionTransitionZipf(beta=beta, zipfs=1.2)
),
n_days=days,
daily_trips_sampling=models.NormalDistribution(mean=3.14, std=1.8)
)
# Calculate visits
self.visits = visit_factory.sample(self.geotweets)
return self.visits
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 1. Generate visits for Sao Paulo
# + pycharm={"name": "#%%\n"}
p, gamma, beta = 0.98, 0.18, 0.16
days = 260
region2compute = 'saopaulo'
# prepare region data by initiating the class
print(f'{region2compute} started...')
g = RegionParaGenerate(region=region2compute)
print('Loading zones to get boundary...')
g.country_zones_boundary_load()
print('Loading geotagged tweets...')
g.load_geotweets()
print('Generating visits...')
g.visits_gen(p=p, gamma=gamma, beta=beta, days=days)
# + pycharm={"name": "#%%\n"}
g.geotweets.to_csv(f'../../dbs/{region2compute}/geotweets_dom_cali.csv')
g.visits.to_csv(f'../../dbs/{region2compute}/visits_dom_cali.csv')
# -
# ## 2. Select an individual
# + pycharm={"name": "#%%\n"}
df_users = g.geotweets.groupby('userid').size()
df_users.head()
# + pycharm={"name": "#%%\n"}
# ID= 46292618
eg_id = 46292618
# Individual sparse geotagged tweets
df_tw = g.geotweets.loc[g.geotweets.index == eg_id, :]
gdf_tw = gpd.GeoDataFrame(
df_tw,
crs={'init': 'epsg:4326'},
geometry=gpd.points_from_xy(df_tw['longitude'],
df_tw['latitude']))
# Get spatial zones
gs = saopaulo.GroundTruthLoader()
gs.load_zones()
zones = gs.zones
# Filter out the tweets outside the study area for the sake of visualisation
gdf_tw = gpd.sjoin(gdf_tw, zones.to_crs(4326), op='intersects')
df_tw = gdf_tw.drop(columns=['geometry'])
df_visits = g.visits.loc[g.visits.index == eg_id, :]
od = genericvalidation.visits_to_odm(df_visits, zones)
od = od.reset_index()
od.columns = ['ozone', 'dzone', 'user_' + str(eg_id)]
# + pycharm={"name": "#%%\n"}
# Benchmark model odm generation
df_tw = df_tw.loc[:, ['region', 'createdat', 'latitude', 'longitude', 'label']]
df_tw.loc[:, 'kind'] = 'region'
od_b = genericvalidation.visits_to_odm(df_tw, zones, timethreshold_hours=24)
od_b = od_b.reset_index()
od_b.columns = ['ozone', 'dzone', 'user_' + str(eg_id)]
# + pycharm={"name": "#%%\n"}
od_b.to_csv(ROOT_dir + '/results/input-output-example/odm_benchmark.csv', index=False)
od.to_csv(ROOT_dir + '/results/input-output-example/odm.csv', index=False)
df_tw.to_csv(ROOT_dir + '/results/input-output-example/tweets.csv', index=False)
| src/py/6-model-input-output-illustration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
# save numpy array as csv file
from numpy import asarray
from numpy import savetxt
# -
def createInputData():
data = np.random.uniform(-12,12,size=(18000,31))
dataInt1 = ((2*np.random.randint(0,2,size=(1000,31))-1))*12
dataInt2 = ((2*np.random.randint(0,2,size=(1000,31))-1))*12
dataInt3 = np.random.randint(-1, 2, (1000, 31))*12
dataInt4 = np.random.randint(-1, 2, (1000, 31))*12
a1 = np.array([12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12]).reshape(31,1)
a2 = np.array([-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12]).reshape(31,1)
a3 = np.array([12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12]).reshape(31,1)
a4 = np.array([-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12]).reshape(31,1)
a5 = np.array([12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12]).reshape(31,1)
a6 = np.array([-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12,12,12,12,-12,-12,-12]).reshape(31,1)
a = np.array([[a1],[a2],[a3],[a4],[a5],[a6]]).reshape(6,31)
data = np.r_[a,dataInt1,dataInt3,data,dataInt2,dataInt4,a]
return data
# +
# define data
data = asarray(createInputData(), dtype=np.float64,)
# save to csv file
savetxt('dataInputLarge2.csv', data, delimiter=',')
# -
| JupLab-Code/CreateData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pmd_beamphysics import ParticleGroup
from pmd_beamphysics.statistics import A_mat_calc, twiss_calc, normalized_particle_coordinate
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
matplotlib.rcParams['figure.figsize'] = (4,4)
# -
# # Simple Normalized Coordinates in ParticleGroup
#
# 1D normalized coordinates originate from the normal form decomposition, where the transfer matrix that propagates phase space coordinates $(x, p)$ is decomposed as
#
# $M = A \cdot R(\theta) \cdot A^{-1}$
#
# And the matrix $A$ can be parameterized as
#
# A = $\begin{pmatrix}\sqrt{\beta} & 0\\-\alpha/\sqrt{\beta} & 1/\sqrt{\beta}\end{pmatrix}$
#
# ?A_mat_calc
# +
# Make phase space circle. This will represent some normalized coordinates
theta = np.linspace(0, np.pi*2, 100)
zvec0 = np.array([np.cos(theta), np.sin(theta)])
plt.scatter(*zvec0)
# -
# Make a 'beam' in 'lab coordinates'
MYMAT = np.array([[10, 0],[-3, 5]])
zvec = np.matmul(MYMAT , zvec0)
plt.scatter(*zvec)
# With a beam, $\alpha$ and $\beta$ can be determined from moments of the covariance matrix.
# ?twiss_calc
# Calculate a sigma matrix, get the determinant
sigma_mat2 = np.cov(*zvec)
np.linalg.det(sigma_mat2)
# Get some twiss
twiss = twiss_calc(sigma_mat2)
twiss
# Analyzing matrices
A = A_mat_calc(twiss['beta'], twiss['alpha'])
A_inv = A_mat_calc(twiss['beta'], twiss['alpha'], inverse=True)
# A_inv turns this back into a circle.
zvec2 = np.matmul(A_inv, zvec)
plt.scatter(*zvec2)
# # Twiss parameters
#
# Effective Twiss parameters can be calculated from the second order moments of the particles.
#
# This does not change the phase space area
twiss_calc(np.cov(*zvec2))
# Reset plot
matplotlib.rcParams['figure.figsize'] = (13,8)
# # x_bar, px_bar, Jx, etc.
#
# These are essentially action-angle coordinates, calculated by using the an analyzing twiss dict
# ?normalized_particle_coordinate
# +
# Get some example particles
P = ParticleGroup('data/bmad_particles2.h5')
# This is a typical transverse phase space plot
P.plot('x', 'px')
# -
# If no twiss is given, then the analyzing matrix is computed from the beam itself.
normalized_particle_coordinate(P, 'x', twiss=None)
# This is equivelent
normalized_particle_coordinate(P, 'x', twiss=twiss_calc(P.cov('x', 'px')), mass_normalize=False)/np.sqrt(P.mass)
# And is given as a property:
P.x_bar
# The amplitude is defined as:
(P.x_bar**2 + P.px_bar**2)/2
# This is also given as a property
P.Jx
# Note the mass normalization is the same
P.Jx.mean(), P['mean_Jx'], P['norm_emit_x']
# This is now nice and roundish
P.plot('x_bar', 'px_bar')
# Jy also works. This gives some sense of where the emittance is larger.
P.plot('t', 'Jy')
# Sort by Jx:
P = P[np.argsort(P.Jx)]
# Now particles are ordered
plt.plot(P.Jx)
# This can be used to calculate the 95% emittance
P[0:int(0.95*len(P))]['norm_emit_x']
# # Simple 'matching'
#
# Often a beam needs to be 'matched' for tracking in some program.
#
# This is a 'faked' tranformation that ultimately would need to be realized by a focusing system.
def twiss_match(x, p, beta0=1, alpha0=0, beta1=1, alpha1=0):
"""
Simple Twiss matching.
Takes positions x and momenta p, and transforms them according to
initial Twiss parameters:
beta0, alpha0
into final Twiss parameters:
beta1, alpha1
This is simply the matrix ransformation:
xnew = ( sqrt(beta1/beta0) 0 ) . ( x )
pnew ( (alpha0-alpha1)/sqrt(beta0*beta1) sqrt(beta0/beta1) ) ( p )
Returns new x, p
"""
m11 = np.sqrt(beta1/beta0)
m21 = (alpha0-alpha1)/np.sqrt(beta0*beta1)
xnew = x * m11
pnew = x * m21 + p / m11
return xnew, pnew
# +
# Get some Twiss
T0 = twiss_calc(P.cov('x', 'xp'))
T0
# -
# Make a copy, and maniplulate
P2 = P.copy()
P2.x, P2.px = twiss_match(P.x, P.px/P['mean_p'], beta0=T0['beta'], alpha0=T0['alpha'], beta1=9, alpha1=-2)
P2.px *= P['mean_p']
twiss_calc(P2.cov('x', 'xp'))
# +
# Make a dedicated routine
def matched_particles(particle_group, beta=None, alpha=None, plane='x', p0c=None, inplace=False):
"""
Perfoms simple Twiss 'matching' by applying a linear transformation to
x, px if plane == 'x', or x, py if plane == 'y'
Returns a new ParticleGroup
If inplace, a copy will not be made, and changes will be done in place.
"""
assert plane in ('x', 'y'), f'Invalid plane: {plane}'
if inplace:
P = particle_group
else:
P = particle_group.copy()
if not p0c:
p0c = P['mean_p']
# Use Bmad-style coordinates.
# Get plane.
if plane == 'x':
x = P.x
p = P.px/p0c
else:
x = P.y
p = P.py/p0c
# Get current Twiss
tx = twiss_calc(np.cov(x, p, aweights=P.weight))
# If not specified, just fill in the current value.
if alpha is None:
alpha = tx['alpha']
if beta is None:
beta = tx['beta']
# New coordinates
xnew, pnew = twiss_match(x, p, beta0=tx['beta'], alpha0=tx['alpha'], beta1=beta, alpha1=alpha)
# Set
if plane == 'x':
P.x = xnew
P.px = pnew*p0c
else:
P.y = xnew
P.py = pnew*p0c
return P
# Check
P3 = matched_particles(P, beta=None, alpha=-4, plane='y')
P.twiss(plane='y'), P3.twiss(plane='y')
# -
# These functions are in statistics
from pmd_beamphysics.statistics import twiss_match, matched_particles
| examples/normalized_coordinates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Data Source - Toxic Comment Classification Challenge (Kaggle)
# +
import pandas as pd, numpy as np
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_extraction.text import TfidfVectorizer
import config as cfg
# + pycharm={"name": "#%%\n"}
train = pd.read_csv(cfg.RESOURCE.toxic_comment_classification_train)
train.head()
# + pycharm={"name": "#%%\n"}
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train_toxic = train[train[label_cols].max(axis=1) >= 1].copy()
train_toxic['comment_text'].fillna("unknown", inplace=True)
train_toxic.info()
# + pycharm={"name": "#%%\n"}
test = pd.read_csv(cfg.RESOURCE.toxic_comment_classification_test)
test_labels = pd.read_csv(cfg.RESOURCE.toxic_comment_classification_test_labels)
test = pd.merge(test, test_labels, on='id', how='inner')
# + pycharm={"name": "#%%\n"}
test_toxic = test[test[label_cols].max(axis=1) >= 1].copy()
test_toxic['comment_text'].fillna("unknown", inplace=True)
test_toxic.info()
# + pycharm={"name": "#%%\n"}
all_doc = pd.concat([train_toxic, test_toxic])
all_doc.info()
all_doc.head()
# + pycharm={"name": "#%%\n"}
import re, string
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
def tokenize(s): return re_tok.sub(r' \1 ', s).split()
n = all_doc.shape[0]
vec = TfidfVectorizer(ngram_range=(2,2), tokenizer=None, max_features=10000,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1, stop_words='english')
all_tf_idf = vec.fit_transform(all_doc['comment_text'])
# + pycharm={"name": "#%%\n"}
all_toxic = all_doc['toxic'].values
all_severe_toxic = all_doc['severe_toxic'].values
all_obscene = all_doc['obscene'].values
all_threat = all_doc['threat'].values
all_insult = all_doc['insult'].values
all_identity_hate = all_doc['identity_hate'].values
# + pycharm={"name": "#%%\n"}
mi_toxic = mutual_info_classif(all_tf_idf, all_toxic, n_neighbors=3, random_state=2020)
mi_severe_toxic = mutual_info_classif(all_tf_idf, all_severe_toxic, n_neighbors=3, random_state=2020)
mi_obscene = mutual_info_classif(all_tf_idf, all_obscene, n_neighbors=3, random_state=2020)
mi_threat = mutual_info_classif(all_tf_idf, all_threat, n_neighbors=3, random_state=2020)
mi_insult = mutual_info_classif(all_tf_idf, all_insult, n_neighbors=3, random_state=2020)
mi_identity_hate = mutual_info_classif(all_tf_idf, all_identity_hate, n_neighbors=3, random_state=2020)
# + pycharm={"name": "#%%\n"}
features = np.array(vec.get_feature_names())
# -
# ## (1/6) Mutual information score ranking for "Toxic" (top 100)
# + pycharm={"name": "#%%\n"}
top_mi_toxic = np.argsort(mi_toxic)[::-1][:100]
features[top_mi_toxic]
# -
# ## (2/6) Mutual information score ranking for "Severe Toxic" (top 100)
# + pycharm={"name": "#%%\n"}
top_mi_severe_toxic = np.argsort(mi_severe_toxic)[::-1][:100]
features[top_mi_severe_toxic]
# -
# ## (3/6) Mutual information score ranking for "Obscene" (top 100)
# + pycharm={"name": "#%%\n"}
top_mi_obscene = np.argsort(mi_obscene)[::-1][:100]
features[top_mi_obscene]
# -
# ## (4/6) Mutual information score ranking for "Threat" (top 100)
# + pycharm={"name": "#%%\n"}
top_mi_threat = np.argsort(mi_threat)[::-1][:100]
features[top_mi_threat]
# -
# ## (5/6) Mutual information score ranking for "Insult" (top 100)
# + pycharm={"name": "#%%\n"}
top_mi_insult = np.argsort(mi_insult)[::-1][:100]
features[top_mi_insult]
# -
# ## (6/6) Mutual information score ranking for "Identity Hate" (top 100)
# + pycharm={"name": "#%%\n"}
top_mi_identity_hate = np.argsort(mi_identity_hate)[::-1][:100]
features[top_mi_identity_hate]
| toxic_language_mining/toxic_comment_classification_challenge/mining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics
import warnings
warnings.filterwarnings('ignore')
# -
# ### Load the dataset
#
# - Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
# Code starts here
df=pd.read_csv("train.csv")
df.head()
# Code ends here.
df.shape
df.isnull().sum()
df['insuranceclaim'].value_counts()
df['insuranceclaim'].value_counts()/len(df)
df.info()
df.drop('Id',axis=1,inplace=True)
# ### EDA & Data Preprocessing
#
# - Check for the categorical & continuous features.
# - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.
# Code starts here
categorical=df[["sex","children","smoker","region","insuranceclaim"]]
continuous=df[["age","bmi","charges",]]
# +
# to visualize distributions of the column(s) by plotting them.
import matplotlib.pyplot as plt
def plot_distribution(kind_,df,col):
"""Plot distribution of the column(s).
This function will plot a chart of the passed column as the 'kind' specified in kind_.
You can pass multiple columns to this function.
Keyword arguments:
knid_ -- 'kind' of chart that will be plotted
df -- pandas dataframe which has the data
col -- list of all the features for which we want to plot the distribution
"""
for i in col:
df[i].plot(kind=kind_)
plt.xlabel(i)
plt.show()
plot_distribution("hist",df,["age","bmi","charges"])
plot_distribution("box",df,["age","bmi","charges"])
# -
def log(df,col):
lst = []
for i in df[col]:
if i < 0:
#temp = np.sqrt(abs(i))
lst.append(-np.log(abs(i)))
else:
#temp = np.sqrt(i)
lst.append(np.log(i))
df.loc[:,col] = lst
log(df,'charges')
plot_distribution("hist",df,["age","bmi","charges"])
# +
df.boxplot(column=['bmi'])
q_value = df['bmi'].quantile(0.95)
print("q_value: ", q_value)
# +
relation = df.corr()
print("relation: \n", relation)
sns.pairplot(df)
# +
cols = ['children','sex','region','smoker']
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(20,20))
for i in range(2):
for j in range(2):
col = cols[ i * 2 + j]
sns.countplot(x = df[col], hue = df['insuranceclaim'], ax = axes[i,j])
# -
# ### Model building
#
# - Separate the features and target and then split the train data into train and validation set.
# - Now let's come to the actual task, using logistic regression, predict the insuranceclaim. Select the best model by cross-validation using Grid Search.
# - Try improving upon the `roc_auc_score` using different parameters for Grid Search that give the best score.
#
#
# +
# Code starts here
X=df.drop('insuranceclaim',axis=1)
y=df['insuranceclaim']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
# Code ends here.
# +
parameters = {'C':[0.1,0.5,1,5]}
lr = LogisticRegression()
grid = GridSearchCV(estimator = lr, param_grid = parameters)
grid.fit(X_train, y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_pred, y_test)
accuracy
# +
score = roc_auc_score(y_pred, y_test)
y_pred_proba = grid.predict_proba(X_test)[:,1]
fpr, tpr, th = metrics.roc_curve(y_test, y_pred_proba)
roc_auc = roc_auc_score(y_test, y_pred_proba)
print("roc_auc: ",roc_auc)
plt.plot(fpr, tpr, label="Logistic model, auc= "+str(roc_auc))
plt.show()
# -
# ### Prediction on the test data and creating the sample submission file.
#
# - Load the test data and store the `Id` column in a separate variable.
# - Perform the same operations on the test data that you have performed on the train data.
# - Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
# +
# Code starts here
df_test=pd.read_csv("test.csv")
df_test.isnull().sum()
#df_test.shape
# Code ends here.
# -
Id=df_test['Id']
df_test.drop('Id',axis=1,inplace=True)
log(df_test,'charges')
final_pred = grid.predict(df_test)
final_pred
submission = pd.DataFrame({'Id':Id, 'insuranceclaim': final_pred})
submission.to_csv('1st_submission.csv', index = False)
| Project---LogisticRegression---Insurance-claim-prediction/logistic_regression_student_template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Find Lane Markings
# ### Define a class to receive the characteristics of each line detection
# +
import numpy as np
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self,image_shape):
self.maxx = image_shape[0]
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
#self.recent_xfitted = []
#self.recent_yfitted = []
self.recent_fits = []
#average x values of the fitted line over the last n iterations
#self.bestx = None
#self.besty = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
self.radius_of_curvature_array = []
#distance in meters of vehicle center from the line
self.line_base_pos = None
self.line_base_pos_array = []
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
#getters and setters
def set_line(self, pointsx, pointsy):
assert (type(pointsy) is np.ndarray) and (type(pointsx) == np.ndarray)
assert pointsy.size > 0
self.allx = pointsx
self.ally = pointsy
if self.allx.size > 0:
self.detected=True
self.current_fit = np.polyfit(self.ally, self.allx, 2)
self.radius_of_curvature_array.append(self.__calculate_curvature())
if len(self.radius_of_curvature_array) >=15:
self.radius_of_curvature = np.mean(self.radius_of_curvature_array)
self.radius_of_curvature_array.pop(0)
self.line_base_pos_array.append(self.__calculate_dist())
if len(self.line_base_pos_array) >= 15:
self.line_base_pos= np.mean(self.line_base_pos_array)
self.line_base_pos_array.pop(0)
if self.best_fit is None :
self.best_fit = self.current_fit
else:
self.detected=False
self.current_fit = [np.array([False])]
self.radius_of_curvature = None
self.line_base_pos = None
self.best_fit = None
def get_curvature(self):
return self.radius_of_curvature
def get_distance(self):
return self.line_base_pos
def get_fit(self):
return self.best_fit
#return self.current_fit
#predicate methods
def is_detected(self):
return self.detected
#other public methods
def is_similar_to(self, other_line, curv_thresh = 500, dist_offset =0.5):
if self.get_curvature() is not None and other_line.get_curvature() is not None:
similar_curvatures = np.abs(self.get_curvature() - other_line.get_curvature()) <= curv_thresh
similar_distances = np.abs(self.get_distance()) - np.abs(other_line.get_distance()) <= dist_offset
return similar_curvatures and similar_distances
else : return False
#appends x values to x values list, refreshes the moving average of the polynomial coefficients
def update_average(self):
#append y values
self.recent_fits.append(self.current_fit)
if len(self.recent_fits) > 15:
remove = self.recent_fits.pop(0)
if len(self.recent_fits)>1:
self.best_fit = np.mean(self.recent_fits,axis=0)
else:
self.best_fit = self.current_fit
#private methods
def __calculate_curvature(self):
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
fit = np.polyfit(self.ally*ym_per_pix, self.allx*xm_per_pix, 2)
y_eval = np.max(self.ally)
curvature_radius = ((1 + (2*fit[0]*y_eval*ym_per_pix + fit[1])**2)**1.5) / np.absolute(2*fit[0])
return curvature_radius
def __calculate_dist(self):
xm_per_pix = 3.7/700 # meters per pixel in x dimension
midpoint = xm_per_pix*self.maxx/2 #midpoint assuming camera is centered
#get the x value for the lowest y in the image (the one with the highest y value)
sort_indices= self.ally.argsort()
x=self.allx[sort_indices]
return xm_per_pix*x[-1] - midpoint
# -
# ### Function that corrects distorsion
# returns: undistorted image
# +
import glob
#correct distorsion
def calibrate(calibration_images_path='camera_cal/cal*.jpg'):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(calibration_images_path)
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
return ret, mtx, dist
def undistort(img,mtx,dist):
dst = cv2.undistort(img, mtx, dist, None, mtx)
return dst
# -
# ### Function that applies a series of thresholds to an image to detect what seems to be lane markings
# returns: a filtered binary image
# +
#apply thresholds
#apply a simple HSV threshold, leveraging H layer.
def hls_thresh(image):
hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
thresh = {'L': (180,255),
'S': (180,255)}
S = hls[:,:,2]
L = hls[:,:,1]
binary_S = np.zeros_like(S)
binary_S[(S > thresh['S'][0]) & (S <= thresh['S'][1])] = 1
binary_L = np.zeros_like(L)
binary_L[(L > thresh['L'][0]) & (L <= thresh['L'][1])] = 1
combined_hls = np.zeros_like(S)
combined_hls[(binary_S == 1) | (binary_L == 1)] = 1
return combined_hls
def abs_sobel_thresh(img, orient='x', thresh= [0,255], sobel_kernel=3):
# Apply the following steps to img
# 1) Convert to grayscale
gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x' :
gradient = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
elif orient == 'y' :
gradient= cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
else:
raise ValueError('orient must be either x or y')
# 3) Take the absolute value of the derivative or gradient
img= np.absolute(gradient)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
img = np.uint(255*img/np.max(img))
# 5) Create a mask of 1's where the scaled gradient magnitude
grad_binary= np.zeros_like(img)
grad_binary[(img >= thresh[0]) & (img <= thresh[1])] = 1
# is > thresh_min and < thresh_max
# 6) Return this mask as your binary_output image
return grad_binary
#returns: a binary image with emphasis on lane lines
# -
# ### Function that takes a perspective binary image and returns a birdseye version
# returns: birdseye view of lane markings (binary)
# +
#apply perspective transform
def warper(img, src, dst):
# Compute and apply perpective transform
img_size = (img.shape[1], img.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image
return warped,M,Minv
#returns: a birds eye version of the binary
# -
# ### Takes a birdseye view and finds lane patterns using sample window search, returns a list of right and left points to be used for polynomial fit
# returns: x and y points for left and right lane markings
# +
#when the image is new or when the markings have been lost from previous frame, apply sample window search to fit a polynomial
#returns an image with lane markings drawn on original image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
def find_points_using_windows(binary_warped, nwindows = 9, margin = 100, minpix = 50):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx,lefty,rightx,righty
# -
# ### Function that uses existing polynomial to find lane markings in a region around polynomial
# returns a new polynomial
# +
# refresh polynomial
# returns polynomial
def refresh_polynomials(left_fit, right_fit, binary_warped):
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx,lefty,rightx,righty
# -
# ## Pipeline applying transformations to image frames
# +
# Read in a thresholded image for now...
warped = mpimg.imread('warped_example.jpg')
plt.figure()
plt.imshow(warped,cmap='gray')
right_L = Line(warped.shape)
left_L = Line(warped.shape)
def get_lane_lines(warped,left_line,right_line,nwindows = 9, margin = 100, minpix = 50):
# window settings
#window_width = 50
#window_height = 80 # Break image into 9 vertical layers since image height is 720
#margin = 100 # How much to slide left and right for searching
#find lane markings and fit polynomial
#if no existing or valid polynomial : use window slide
if not right_line.is_detected() or not left_line.is_detected():
# Fit a second order polynomial to each
leftx,lefty,rightx,righty= find_points_using_windows(warped,nwindows, margin, minpix=50)
else :
left_fit = left_line.get_fit()
right_fit = right_line.get_fit()
leftx,lefty,rightx,righty= refresh_polynomials(left_fit,right_fit,warped)
if leftx.size ==0 or rightx.size == 0:
leftx,lefty,rightx,righty=find_points_using_windows(warped,nwindows, margin, minpix=50)
left_line.set_line(leftx,lefty)
right_line.set_line(rightx,righty)
left_fit = left_line.get_fit()
right_fit = right_line.get_fit()
return left_line, right_line
left_line,right_line=get_lane_lines(warped,left_L,right_L)
#get data for plot
ploty = left_line.ally
left_fit = left_line.get_fit()
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fit = right_line.get_fit()
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Plot up the data
plt.plot(left_fitx, ploty, color='green', linewidth=3)
plt.plot(right_fitx, ploty, color='green', linewidth=3)
plt.xlim(0, 1280)
plt.ylim(0, 720)
plt.gca().invert_yaxis() # to visualize as we do the images
#print('left line dist: ',left_line.get_distance(),'\nright line dist: ',right_line.get_distance())
#print('is similar: ',right_line.is_similar_to(left_line))
#calculate radius of curvature
#return radius of curvature
#unapply perspective transform and visual representation of result and append curvature results to image
#returns an image with lane markings drawn on original image
#display frames
# -
#find calibration matrix
ret, mtx, dist = calibrate(calibration_images_path='camera_cal/cal*.jpg')
# +
### test all functions
#get a random frame from the video to test :
cap = cv2.VideoCapture('project_video.mp4')
width= cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
length = cap.get(cv2.CAP_PROP_FRAME_COUNT)
try:
assert length > 0
except AssertionError as e :
raise ValueError('the length of the video file appears to be zero')
#pick and display a random frame
#fr = np.random.randint(length)
fr =0
cap.set(cv2.CAP_PROP_POS_FRAMES,fr)
ret, frame= cap.read()
#if ret:
# plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# plt.title('original image')
# plt.axis('off')
cap.release()
def image_pipeline(frame,left_marker,right_marker, debug=False):
#apply pipeline to random frame, print every step
#1. undistort
frame= undistort(frame,mtx,dist)
#display original image
if debug:
plt.figure()
plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.imsave('undistorted.png',cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
plt.title('undistorted image')
#threshold
binary_hls= hls_thresh(frame) #apply filter based on HLS color space
binary_dir_thresh = abs_sobel_thresh(frame, thresh=[20,100], sobel_kernel=15) #apply directional threshold
binary_image = np.zeros_like(binary_hls) #merge two previous filters
binary_image[(binary_hls == 1) | (binary_dir_thresh == 1)] = 1
binary_image= binary_image * 255
#display thresholded image
if debug:
plt.figure()
plt.imshow(binary_image,cmap='gray')
plt.axis('off')
plt.title('thresholded binary image')
#perspective transform
img_size = binary_image.shape[::-1]
src= np.float32([[600,450],[700,450],[200,img_size[1]],[1200,img_size[1]]])
dx = img_size[0] / 5
dy=5
dst = np.float32([[0+dx,0+dy],
[img_size[0]-dx,0+dy],
[0+dx,img_size[1]-dy],
[img_size[0]-dx,img_size[1]-dy]])
warped_binary,M,Minv= warper(binary_image, src, dst)
#display warped image with source and destination lines overlaid
if debug:
black=np.dstack((binary_image,binary_image,binary_image))
pts = src.reshape((-1,1,2))
cv2.polylines(black,[pts.astype(np.int32)[np.array([0,1,3,2])]],True,(0,255,255),5)
pts = dst.reshape((-1,1,2))
cv2.polylines(black,[pts.astype(np.int32)[np.array([0,1,3,2])]],True,(255,0,0),5)
plt.figure()
plt.imshow(black)
plt.title('source and destination')
plt.figure()
plt.imshow(warped_binary,cmap='gray')
plt.title('warped result')
#curvature calculation
## This is where the lane marking object get updated
left_line,right_line=get_lane_lines(warped_binary,left_marker,right_marker)
#run sanity checks
if left_marker.is_similar_to(right_marker):
#print("marker was updated")
left_marker.update_average()
right_marker.update_average()
#plot lines on warped binary
ploty = left_line.ally
left_fit = left_line.get_fit()
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fit = right_line.get_fit()
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
if debug:
plt.plot(left_fitx, ploty, color='green', linewidth=3)
plt.plot(right_fitx, ploty, color='green', linewidth=3)
plt.xlim(0, 1280)
plt.ylim(0, 720)
plt.gca().invert_yaxis() # to visualize as we do the images
#unwrap and draw green zone on original frame
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped_binary).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (frame.shape[1], frame.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(frame, 1, newwarp, 0.3, 0)
if debug:
plt.figure()
plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.title('unwarped result')
font = cv2.FONT_HERSHEY_SIMPLEX
if right_marker.get_curvature() is not None:
cv2.putText(result,'Curvature: '+format(right_marker.get_curvature(),'2.0f')+'m',(10,100), font, 2,(255,255,255),2,cv2.LINE_AA)
if left_marker.get_distance() is not None and right_marker.get_distance() is not None:
cv2.putText(result,'Distance L: '+format(left_marker.get_distance(),'2.2f')+'m R:'+format(right_marker.get_distance(),'2.2f')+'m',(10,200), font, 2,(255,255,255),2,cv2.LINE_AA)
return result
rl = Line(frame.shape[0:2][::-1])
ll = Line(frame.shape[0:2][::-1])
img = image_pipeline(frame,ll,rl,debug=False)
plt.figure()
plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
# -
# ### Process video
print(rl.recent_fits)
print(len(ll.recent_fits))
print(rl.best_fit)
print(rl.current_fit)
# +
from moviepy.editor import VideoFileClip
#todo : it is not convenient not to be able to pass arguments to the function that processes the frame... find another way to edit video ???
rl = Line(frame.shape[0:2][::-1])
ll = Line(frame.shape[0:2][::-1])
def process_image(frame):
return image_pipeline(frame,ll,rl,debug=False)
video_output = 'video_output.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
#clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(6,8)
clip1 = VideoFileClip("project_video.mp4")
#clip1 = VideoFileClip("challenge_video.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
# %time white_clip.write_videofile(video_output, audio=False)
# -
# ### Display video inline
from IPython.display import HTML
#play result
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
| .ipynb_checkpoints/Lane_Marking_Tracker-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intraday Trading via Day Trading Techniques & Indicators
# ---
#
# ### Data collected via AlphaVantage free API using extended intraday data.
# > https://www.alphavantage.co/documentation/
#
# ---
# # 02 - Data Cleaning
# ### Library Imports
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('fivethirtyeight')
# # Read in Feature Engineered Dataset
df = pd.read_csv('../01_Data/extended_intraday_SPY_1min_featured.csv')
df.set_index(pd.DatetimeIndex(df['time']), inplace=True)
df.drop(columns = ['time'], inplace = True)
df.head(15)
# # Create a Mask for Desired Time Frame
#
# **We want to focus on market hours, incorporating only 30 minutes before open and 30 minutes after close when volume is still relatively high.**
test = df.query("time >= '2019-10-28' and time < '2019-10-29'")
test.head()
test.tail()
# **We see our timeframe for each day starts at 4am and ends at 8pm Eastern Time**
#
# **Market hours are from 9:30am to 4pm, but remember that we also want some time before open and after close, so we are looking to make a mask for our dataframe to only show the hours from 9:00am to 4:30pm.**
df.shape
df_filtered = df.between_time('09:00', '16:30')
df_filtered.shape
df_filtered.head()
# **Now we have only the data in our specified time period. To double check, lets observe a specific day.**
test = df_filtered.query("time >= '2019-10-28' and time < '2019-10-29'")
test.head()
test.tail()
# **Our filter looks good.**
#
# **As a final check, lets make sure the plot of our data does not look significantly different from the unfiltered version.**
df_filtered['close'].plot(figsize=(18,9))
df['close'].plot(figsize=(18,9))
plt.legend(['Filtered', 'Unfiltered']);
# **We have, essentially, a perfect overlap. Removing the data we did has not caused significant variance.**
# # Save Filtered Dataset
df_filtered.to_csv('../01_Data/extended_intraday_SPY_1min_filtered.csv')
| 00_Code/02_Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# バイナリファイルダンププログラムでjpegファイルをダンプ
# +
f_name="Parrots.jpg"
f=open(f_name,"rb")
s=f.read()
f.close
print(" ",end="")
for cnt in range(16):
print("{:02x} ".format(cnt),end="")
print("")
cnt=0
rows=0
for byte in s:
if cnt==0:
print("{:03x}# : ".format(rows),end="")
print("{:02x} ".format(byte),end="")
cnt+=1
if cnt==16:
cnt=0
print("")
rows+=1
# -
# jpegファイルのセグメント構造の抽出
# +
marker_def={0xd8:"SOI",0xd9:"EOI",0xda:"SOS",0xe0:"APP0",0xdb:"DQT",0xc0:"SOF0",0xc4:"DHT"}
flag_marker= False
flag_seg=False
flag_seg_cnt=False
flag_seg_data=False
flag_SOI= False
flag_EOI= False
flag_SOS= False
flag_err=False
jpeg_struct=[]
seg_buf=[]
byte_bufs=b''
seg_count=0
f=open(f_name,"rb")
s=f.read()
f.close
for byte in s:
if flag_marker==False and byte==0xFF : #マーカーの判定
flag_marker=True
else:
####### マーカー処理 #########
if flag_marker==True :
#FF00マーカ処理
if byte==0x00 :
byte_bufs=byte_bufs+bytes.fromhex("{:02X}".format(0))
#辞書定義済みマーカ
elif byte in marker_def:
#SOI判定
if flag_SOI==False :
if marker_def[byte]=="SOI" :
flag_SOI=True
jpeg_struct=jpeg_struct+[["SOI"]]
else:
flag_err=True;
#EOI判定
elif marker_def[byte]=="EOI":
#IMAGE DATA格納
#jpeg_struct=jpeg_struct+[["IMG","{:d}".format(len(byte_bufs)),byte_bufs.hex()]]
jpeg_struct=jpeg_struct+[["IMG","{:d}".format(len(byte_bufs)),byte_bufs]]
jpeg_struct=jpeg_struct+[["EOI"]]
flag_EOI=True
#その他定義済マーカ(セグメント処理)
elif byte in marker_def:
seg_buf=[""+marker_def[byte]]
flag_seg=True
#SOS判定
if marker_def[byte]=="SOS":
flag_SOS=True
#未定義マーカ(セグメント処理)
else:
seg_buf=["FF{:X}".format(byte)]
flag_seg=True
flag_marker=False
else:
#セグメント処理
if flag_seg==True:
if(flag_seg_cnt==False):
seg_count=seg_count+1
seg_size_h=byte
flag_seg_cnt=True
elif(flag_seg_data==False):
seg_size=seg_size_h*256+byte
seg_buf=seg_buf+["{:d}".format(seg_size)]
seg_size=seg_size-2
byte_bufs=b''
flag_seg_data=True
else:
byte_bufs=byte_bufs+bytes.fromhex("{:02X}".format(byte))
seg_size=seg_size-1
if seg_size==0:
#seg_buf=seg_buf+[byte_bufs.hex()]
seg_buf=seg_buf+[byte_bufs]
jpeg_struct=jpeg_struct+[seg_buf]
byte_bufs=b''
flag_seg=False
flag_seg_cnt=False
flag_seg_data=False
#IMAGE DATA処理 (SOSセグメント後)
elif flag_SOS==True and flag_seg==False:
byte_bufs=byte_bufs+bytes.fromhex("{:02X}".format(byte))
#例外処理
else:
flag_err=True
if flag_err==True or flag_EOI==True:
break;
if flag_err==False and flag_EOI==True:
print("Succeeded!!")
# -
# 抽出したjpegファイルの構造(リスト型 jpeg_struct)の出力
jpeg_struct
len(jpeg_struct)
for seg in jpeg_struct:
print(seg[0])
# +
flag_SOI= False
flag_EOI= False
flag_SOS= False
flag_err=False
integer vlen[16]
for seg in jpeg_struct:
print(seg[0])
if(seg[0] == "IMG"):
print(" DATA LENGTH : ",seg[1],sep="")
else:
if(seg[0] == "SOI"):
flag_SOI=True
elif(seg[0] == "EOI"):
flag_EOI=True
else:
print(" SEG LENGTH : ",seg[1])
data=seg[2]
if(seg[0] == "APP0"):
print(" ID : ",data[0:4].decode(),sep="")
print(" Ver : ",data[5],".",data[6],sep="")
print(" U : ",data[7],sep="")
print(" Xd : ",data[8]*256+data[9],sep="")
print(" Yd : ",data[10]*256+data[11],sep="")
print(" Xd : ",data[12],sep="")
print(" Yd : ",data[13],sep="")
for i in range(data[12]*data[13]):
print(" RGB",i,":",data[14+i],sep="")
elif(seg[0] == "DQT"):
length = int(seg[1])-3
base = 0
while(length >0):
pqn=data[0]>>4
tqn=data[base]&0x0F;
if(pqn==0):
qlen=64;
else:
qlen=128;
print(" Pq",tqn," : ",pqn,sep="")
print(" Tq",tqn," : ",tqn,sep="")
for i in range(qlen):
print(" Q",tqn,"-",ascii(i)," : ",data[base+1+i],sep="")
length-=qlen
base+=qlen
elif(seg[0] == "SOF0"):
nf=data[5]
print(" P : ",data[1])
print(" Y : ",data[1]*256+data[2],sep="")
print(" X : ",data[3]*256+data[4],sep="")
print(" Nf : ",data[5])
for i in range(nf):
print(" C",i+1," : ",data[6+i*3],sep="")
print(" H",i+1," : ",data[7+i*3]>>4,sep="")
print(" V",i+1," : ",data[7+i*3]&0x0F,sep="")
print(" Tq",i+1," : ",data[8+i*3],sep="")
elif(seg[0] == "DHT"):
thn=data[0]&0x0f
tcn=data[0]>>4
print(" Tc",thn," : ",tcn)
print(" Th",thn," : ",thn)
vlen=[]
for i in range(16):
vlen+= [data[1+i]]
print(" L",i+1," ; ",data[1+i],sep="")
base = 17
for i in range(16):
for j in ragne(vlen[i]):
if(tcn==0):
print(" V",i+1,"-",j+1," : ",data[base+j],sep="")
else:
print(" V",i+1,"-",j+1," : ",data[base+j]>>4,",",data[base+j]&0x0F,sep="")
base+=vlen[i]
elif(seg[0] == "SOS"):
ns=data[0]
print(" Ns : ",ns)
for i in range(ns):
print(" Cs",i+1," : ",data[1+i*2],sep="")
print(" Td",i+1," : ",data[2+i*2]>>4,sep="")
print(" Ta",i+1," : ",data[2+i*2]&0x0F,sep="")
# -
jpeg_struct[2]
ascii(10)
# matplotによるjpegファイル読み込み
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.colors as mpcol
img = mpimg.imread(f_name)
imgplot = plt.imshow(img)
imgplot.axes.set_xticks([]) #x軸の目盛を削除
imgplot.axes.set_yticks([]) #y軸の目盛を削除
# -
# 読み込んだimgの確認
# numpyのndarray型の模様
# 150×150 pixel のRGB3色データと思われる。
type(img)
img.shape
img.dtype
img.size,150*150*3
| .ipynb_checkpoints/jpeg_charange-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
from datetime import timedelta
df = pd.read_csv("../../data/all_with_dates-formatted.csv", encoding='latin-1').drop(["Unnamed: 0","Unnamed: 0.1"], axis=1)
states = pd.DataFrame({'counts' : df[df.country == "United States"].groupby( [ "state"] ).size()}).reset_index().state
counts=0
_states = []
for state in states:
gp = pd.DataFrame({'counts' : df[df.state == state].groupby( [ "just_date"] ).size()}).reset_index().sort_values(["counts"])
if gp[gp.counts > 100].shape[0] >= 22:
_states.append(state)
counts=counts+1
print counts
df["just_date"] = pd.to_datetime(df['created_at']).dt.date
worry=98
s = ""
for date in range(0,23):
s = str(pd.to_datetime("2018-05-14") + timedelta(days=date)) + ","
for state in _states:
_all = df[df.state == state][pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-14") + timedelta(days=date)]
_worried = _all[df.worry==worry]
s = s + str(float((_worried.shape[0]*100))/_all.shape[0]) + ","
print s
min(df.just_date)
_states
# ## Indian states
indian_states = pd.DataFrame({'counts' : df[df.country == "India"].groupby( [ "state"] ).size()}).reset_index().state
counts=0
indian_states_fil = []
for state in indian_states:
gp = pd.DataFrame({'counts' : df[df.state == state].groupby( [ "just_date"] ).size()}).reset_index().sort_values(["counts"])
if gp[gp.counts > 100].shape[0] >= 22:
indian_states_fil.append(state)
print state
counts=counts+1
print counts
a = pd.DataFrame({'counts' : df[df.country == "United States"].groupby( [ "state"] ).size()}).reset_index().sort_values(["counts"])
indian_states_fil = a[a.counts > 1000].state.values
indian_states_fil.sort()
print indian_states_fil
worry=98
for state in indian_states_fil:
print round(float(df[df.state == state][df.worry == worry].shape[0]*100)/df[df.state == state].shape[0],1)
print indian_states_fil
worry=98
s = ""
for date in range(0,23):
s = str(pd.to_datetime("2018-05-14") + timedelta(days=date)) + ","
for state in indian_states_fil:
_all = df[df.state == state][pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-14") + timedelta(days=date)]
_worried = _all[df.worry==worry]
s = s + str(float((_worried.shape[0]*100))/_all.shape[0]) + ","
print s
counts=0
for country in df.country.unique():
gp = pd.DataFrame({'counts' : df[df.country == country].groupby( [ "just_date"] ).size()}).reset_index().sort_values(["counts"])
if gp[gp.counts > 250].shape[0] >= 15:
print country, gp[gp.counts > 250].shape[0]
counts=counts+1
print counts
# ### What worries world
classes = {91: 'health', 92: 'safety_security', 93 : 'environment',
94 : 'social_relations', 95 : 'meaning_in_life', 96 : 'achievement',
97 : 'economics', 98 : 'politics', 99 : 'not_applicable', 0 : 'skip' }
s = ""
for worry in _df.worry.unique():
s = s + "," + classes[worry]
print s
s = ""
for date in range(0,23):
s = str(pd.to_datetime("2018-05-14") + timedelta(days=date)) + ","
for worry in df.worry.unique():
_all = _df[pd.to_datetime(_df.just_date) == pd.to_datetime("2018-05-14") + timedelta(days=date)]
_worried = _all[_df.worry==worry]
s = s + str(float((_worried.shape[0]*100))/_all.shape[0]) + ","
print s
a = df[pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-26 00:00:00")][df.worry==92]
pd.DataFrame({'counts' : a.groupby( [ "url"] ).size()}).reset_index().sort_values(["counts"],ascending=False).values[:10]
df[df.url == "https://www.yahoo.com/news/trumps-son-concerned-wiretaps-show-trump-jr-met-putin-ally-231215529.html"].worry.unique()
_df = df
url = "https://www.yahoo.com/news/trumps-son-concerned-wiretaps-show-trump-jr-met-putin-ally-231215529.html"
_df.loc[_df.url == url,"worry"] = [94]
float(_df[pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-26 00:00:00")][_df.url == url].shape[0])/_df[pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-26 00:00:00")].shape[0]
#_df[_df.url == url].shape[0]/_df[_df.url == url].shape[0]
worry=98
for state in ["Australia", "Belgium", "Canada", "Colombia", "France", "Germany", "India", "Indonesia", "Ireland", "Italy", "Japan", "Malaysia", "Netherlands", "Norway", "Pakistan", "Philippines", "Singapore", "South Africa", "Spain", "Sweden", "Thailand", "United Arab Emirates", "United Kingdom", "United States"]:
print round(float(df[df.country == state][df.worry == worry].shape[0]*100)/df[df.country == state].shape[0],1)
a = df[pd.to_datetime(df.just_date) == pd.to_datetime("2018-06-03")][df.worry == 98]
a = pd.DataFrame({'counts' : a.groupby( ["url"] ).size()}).reset_index().sort_values(["counts"],ascending = False).values[:10]
a
for row in a:
if "trump" in row[0]:
print row
b = df[pd.to_datetime(df.just_date) == pd.to_datetime("2018-06-03")][df.worry == 98]
b = pd.DataFrame({'counts' : b.groupby( ["country"] ).size()}).reset_index().sort_values(["counts"],ascending = False).values[:10]
b
_c = ["United States",
"India",
"Canada",
"United Kingdom",
"Australia",
"Malaysia",
"Pakistan"]
worry=98
s = ""
for date in range(0,1):
s = str(pd.to_datetime("2018-06-03") + timedelta(days=date)) + ","
score = 0.0
for country in _c:
_all = df[df.country == country][pd.to_datetime(df.just_date) == pd.to_datetime("2018-06-03") + timedelta(days=date)]
_worried = _all[df.worry==worry]
score = score + float((_worried.shape[0]*100))/_all.shape[0]
print country , str(float((_worried.shape[0]*100))/_all.shape[0])
print score/len(_c)
df[pd.to_datetime()]
df[df.country == country][pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-14") + timedelta(days=date)]
worry=98
s = ""
contries = [
"Canada","India","United Kingdom","United States","Australia","Malaysia","Pakistan"
]
for date in range(0,23):
s = str(pd.to_datetime("2018-05-14") + timedelta(days=date)) + ","
for country in contries:
_all = df[df.country == country][pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-14") + timedelta(days=date)]
_worried = _all[df.worry==worry]
s = s + str(float((_worried.shape[0]*100))/_all.shape[0]) + ","
print s
df[df.country == "Pakistan"][pd.to_datetime(df.just_date) == pd.to_datetime("2018-05-14") + timedelta(days=date)]
| micromort/tweets_processing/US_states processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sbooeshaghi/azucar/blob/main/analysis/293T/obs6/assign.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="iKTJTwTt3OF0" outputId="c3273c28-66a9-4189-99c1-4f6b5a91d76f"
# !pip install --quiet -U upsetplot scikit-learn ipfn
# + colab={"base_uri": "https://localhost:8080/"} id="_0UcVgt9IiBw" outputId="d21f7118-f3dd-4e6a-84f4-b0f4641aaa51"
# !git clone https://github.com/sbooeshaghi/azucar.git
# + cellView="form" id="4YqMAvSRh78z"
#@title index.py
# %%bash
# echo -n "#!/usr/bin/env python3
import sys
import os
from collections import defaultdict
def write_dict(fname, d):
inv_d = {v: k for k, v in d.items()}
with open(fname, 'w') as f:
for idx in range(len(d)):
f.write(f'{inv_d[idx]}\n')
def write_markers(fname, markers):
with open(fname, 'w') as f:
for k, v in markers.items():
f.write(f'{k}\t')
n = len(v)
for idx, i in enumerate(v):
f.write(f'{i}')
if idx < n - 1:
f.write(',')
f.write('\n')
def read_markers(fname,
markers_ec=defaultdict(list),
celltype=defaultdict(),
marker_genes=defaultdict()):
with open(fname, 'r') as f:
for idx, line in enumerate(f.readlines()):
ct, genes = line.strip().split('\t')
celltype[ct] = idx
# two things
# 1. make marker_genes list
# 2. make markers_ec
for g in genes.split(','):
gidx = len(marker_genes)
# check if the gene has been added already
if g in marker_genes.keys(): # gene repeated
gidx = marker_genes[g]
else:
marker_genes[g] = gidx
# for the cell type index, add the marker gene index
markers_ec[celltype[ct]].append(marker_genes[g])
# sort the marker genes
markers_ec[celltype[ct]] = sorted(markers_ec[celltype[ct]])
def main(markers_fname, outdir):
markers_ec = defaultdict(list)
celltypes = defaultdict()
marker_genes = defaultdict()
read_markers(markers_fname, markers_ec, celltypes, marker_genes)
write_markers(os.path.join(outdir, 'markers.ec'), markers_ec)
write_dict(os.path.join(outdir, 'groups.txt'), celltypes)
write_dict(os.path.join(outdir, 'marker_genes.txt'), marker_genes)
if __name__ == '__main__':
markers_fname = sys.argv[1]
outdir = sys.argv[2]
main(markers_fname, outdir)" > index.py
# + cellView="form" id="LR_52CgziWnT"
#@title select.py
# %%bash
# echo -n "#!/usr/bin/env python3
import sys
import os
from collections import defaultdict
def read_markers(fname,
markers_ec=defaultdict(list),
celltype=defaultdict(),
marker_genes=defaultdict()):
with open(fname, 'r') as f:
for idx, line in enumerate(f.readlines()):
ct, genes = line.strip().split('\t')
celltype[ct] = idx
# two things
# 1. make marker_genes list
# 2. make markers_ec
for g in genes.split(','):
gidx = len(marker_genes)
# check if the gene has been added already
if g in marker_genes.keys(): # gene repeated
gidx = marker_genes[g]
else:
marker_genes[g] = gidx
# for the cell type index, add the marker gene index
markers_ec[celltype[ct]].append(marker_genes[g])
# sort the marker genes
markers_ec[celltype[ct]] = sorted(markers_ec[celltype[ct]])
def read_genes(genes_fname, genes=defaultdict()):
with open(genes_fname) as f:
for idx, line in enumerate(f.readlines()):
gene = line.strip()
genes[gene] = idx
def sel_genes(genes, marker_genes, sel=[]):
mg_inv = {v: k for k, v in marker_genes.items()}
for idx in range(len(mg_inv)):
# this maps the marker gene name index to the gene index
# in order of the marker_genes file
sel.append(genes[mg_inv[idx]])
def write_list(fname, lst):
with open(fname, 'w') as f:
for el in lst:
f.write(f'{el}\n')
def main(markers_fname, genes_fname, outdir):
markers_ec = defaultdict(list)
celltypes = defaultdict()
marker_genes = defaultdict()
# this is duplicated from index, not ideal but w/e maybe ok
# ideally would want to give it markers.ec
read_markers(markers_fname, markers_ec, celltypes, marker_genes)
genes = defaultdict()
read_genes(genes_fname, genes)
sel = []
sel_genes(genes, marker_genes, sel)
write_list(os.path.join(outdir, 'select.txt'), sel)
if __name__ == '__main__':
markers_fname = sys.argv[1]
genes_fname = sys.argv[2]
outdir = sys.argv[3]
main(markers_fname, genes_fname, outdir)" > select.py
# + id="QOiBVrZJihWG"
# !chmod +x index.py
# !chmod +x select.py
# + cellView="form" id="-j38efKcJLki"
#@title sklearn/mixture/_base.py
"""Base class for mixture models."""
# sklearn/mixture/_base.py
# Author: <NAME> <<EMAIL>>
# Modified by <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from scipy.special import logsumexp
from sklearn import cluster
from sklearn.base import BaseEstimator
from sklearn.base import DensityMixin
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_is_fitted
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : str
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError(
"The parameter '%s' should have the shape of %s, but got %s" %
(name, param_shape, param.shape))
class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
def __init__(
self,
n_components,
tol,
reg_covar,
max_iter,
n_init,
init_params,
random_state,
warm_start,
verbose,
verbose_interval,
):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
def _check_initial_parameters(self, X):
"""Check values of the basic parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
if self.n_components < 1:
raise ValueError("Invalid value for 'n_components': %d "
"Estimation requires at least one component" %
self.n_components)
if self.tol < 0.0:
raise ValueError("Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative" %
self.tol)
if self.n_init < 1:
raise ValueError(
"Invalid value for 'n_init': %d Estimation requires at least one run"
% self.n_init)
if self.max_iter < 1:
raise ValueError("Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration" %
self.max_iter)
if self.reg_covar < 0.0:
raise ValueError("Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % self.reg_covar)
# Check all the parameters values of the derived class
self._check_parameters(X)
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state, B=None, resp=None):
"""Initialize the model parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
random_state : RandomState
A random number generator instance that controls the random seed
used for the method chosen to initialize the parameters.
"""
n_samples, _ = X.shape
if self.init_params == "kmeans":
resp = np.zeros((n_samples, self.n_components))
label = (cluster.KMeans(n_clusters=self.n_components,
n_init=1,
random_state=random_state).fit(X).labels_)
resp[np.arange(n_samples), label] = 1
elif self.init_params == "random":
if resp is None:
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError("Unimplemented initialization method '%s'" %
self.init_params)
self._initialize(X, resp, B)
@abstractmethod
def _initialize(self, X, resp, B=None):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for ``max_iter``
times until the change of likelihood or lower bound is less than
``tol``, otherwise, a ``ConvergenceWarning`` is raised.
If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
initialization is performed upon the first call. Upon consecutive
calls, training starts where it left off.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
The fitted mixture.
"""
self.fit_predict(X, y)
return self
def fit_predict(self, X, y=None, B=None, resp=None):
"""Estimate model parameters using X and predict the labels for X.
The method fits the model n_init times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is
raised. After fitting, it predicts the most probable label for the
input data points.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
X = self._validate_data(X,
dtype=[np.float64, np.float32],
ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError("Expected n_samples >= n_components "
f"but got n_components = {self.n_components}, "
f"n_samples = {X.shape[0]}")
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not (self.warm_start and hasattr(self, "converged_"))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.inf
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state, B=B, resp=resp)
lower_bound = -np.inf if do_init else self.lower_bound_
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp, B)
lower_bound = self._compute_lower_bound(
log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(lower_bound)
if lower_bound > max_lower_bound or max_lower_bound == -np.inf:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn(
"Initialization %d did not converge. "
"Try different init parameters, "
"or increase max_iter, tol "
"or check for degenerate data." % (init + 1),
ConvergenceWarning,
)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X)
return log_resp.argmax(axis=1)
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp, B=None):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log-likelihood of each sample in `X` under the current model.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like of shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
log_likelihood : float
Log-likelihood of `X` under the Gaussian mixture model.
"""
return self.score_samples(X).mean()
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Evaluate the components' density for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Density of each Gaussian component for each sample in X.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample.
y : array, shape (nsamples,)
Component labels.
"""
check_is_fitted(self)
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components))
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == "full":
X = np.vstack([
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample
) in zip(self.means_, self.covariances_, n_samples_comp)
])
elif self.covariance_type == "tied":
X = np.vstack([
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(self.means_, n_samples_comp)
])
else:
X = np.vstack([
mean + rng.randn(sample, n_features) * np.sqrt(covariance)
for (mean, covariance, sample
) in zip(self.means_, self.covariances_, n_samples_comp)
])
y = np.concatenate([
np.full(sample, j, dtype=int)
for j, sample in enumerate(n_samples_comp)
])
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_samples, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(" Iteration %d\t time lapse %.5fs\t ll change %.5f" %
(n_iter, cur_time - self._iter_prev_time, diff_ll))
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print("Initialization converged: %s\t time lapse %.5fs\t ll %.5f" %
(self.converged_, time() - self._init_prev_time, ll))
# + cellView="form" id="xMiE1A6KJUXH"
#@title sklearn/mixture/_gaussian_mixture.py
"""Gaussian Mixture Model."""
# sklearn/mixture/_gaussian_mixture.py
# Author: <NAME> <<EMAIL>>
# Modified by <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
# from ._base import BaseMixture, _check_shape these come from cell above
from sklearn.utils import check_array
from sklearn.utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like of shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights,
dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components, ), "weights")
# check range
if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f" %
(np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0):
raise ValueError(
"The parameter 'weights' should be normalized, but got sum(weights) = %.5f"
% np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like of shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), "means")
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T)
and np.all(linalg.eigvalsh(precision) > 0.0)):
raise ValueError(
"'%s precision' should be symmetric, positive-definite" %
covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : str
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(
precisions,
dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == "full",
)
precisions_shape = {
"full": (n_components, n_features, n_features),
"tied": (n_features, n_features),
"diag": (n_components, n_features),
"spherical": (n_components, ),
}
_check_shape(precisions, precisions_shape[covariance_type],
"%s precision" % covariance_type)
_check_precisions = {
"full": _check_precisions_full,
"tied": _check_precision_matrix,
"diag": _check_precision_positivity,
"spherical": _check_precision_positivity,
}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means**2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk, means,
reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type, B=None):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
# print("Doing the thing..")
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
# frankie
# get the mins for the marker genes
# ct_mins = [means[:, i].min() for i in B]
marker_gene_indices = [set(np.where(i)[0]) for i in B]
ct_mins = [means[i][B[i]].min() for i in range(means.shape[0])]
marker_gene_indices = [set(B[i]) for i in range(means.shape[0])]
# modify based on the min/f
f = 2.
for idx, i in enumerate(means):
ct_min = ct_mins[idx]
betas = means[idx]
for jdx, b in enumerate(betas):
if jdx not in marker_gene_indices[idx]:
new = 1e-3 # min(b, ct_min / f)
means[idx][jdx] = new
covariances = {
"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical,
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar.")
if covariance_type == "full":
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type == "tied":
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == "full":
n_components, _, _ = matrix_chol.shape
log_det_chol = np.sum(
np.log(matrix_chol.reshape(n_components, -1)[:, ::n_features + 1]),
1)
elif covariance_type == "tied":
log_det_chol = np.sum(np.log(np.diag(matrix_chol)))
elif covariance_type == "diag":
log_det_chol = np.sum(np.log(matrix_chol), axis=1)
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(precisions_chol, covariance_type,
n_features)
if covariance_type == "full":
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "tied":
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "diag":
precisions = precisions_chol**2
log_prob = (np.sum((means**2 * precisions), 1) -
2.0 * np.dot(X, (means * precisions).T) +
np.dot(X**2, precisions.T))
elif covariance_type == "spherical":
precisions = precisions_chol**2
log_prob = (np.sum(means**2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class ImprovedGaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Read more in the :ref:`User Guide <gmm>`.
.. versionadded:: 0.18
Parameters
----------
n_components : int, default=1
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
String describing the type of covariance parameters to use.
Must be one of:
'full'
each component has its own general covariance matrix
'tied'
all components share the same general covariance matrix
'diag'
each component has its own diagonal covariance matrix
'spherical'
each component has its own single variance
tol : float, default=1e-3
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, default=1e-6
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, default=100
The number of EM iterations to perform.
n_init : int, default=1
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, default='kmeans'
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like of shape (n_components, ), default=None
The user-provided initial weights.
If it is None, weights are initialized using the `init_params` method.
means_init : array-like of shape (n_components, n_features), default=None
The user-provided initial means,
If it is None, means are initialized using the `init_params` method.
precisions_init : array-like, default=None
The user-provided initial precisions (inverse of the covariance
matrices).
If it is None, precisions are initialized using the 'init_params'
method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state : int, RandomState instance or None, default=None
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
In that case, 'n_init' is ignored and only a single initialization
occurs upon the first call.
See :term:`the Glossary <warm_start>`.
verbose : int, default=0
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default=10
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like of shape (n_components,)
The weights of each mixture components.
means_ : array-like of shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Lower bound value on the log-likelihood (of the training data with
respect to the model) of the best fit of EM.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
Examples
--------
>>> import numpy as np
>>> from sklearn.mixture import GaussianMixture
>>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
>>> gm = GaussianMixture(n_components=2, random_state=0).fit(X)
>>> gm.means_
array([[10., 2.],
[ 1., 2.]])
>>> gm.predict([[0, 0], [12, 3]])
array([1, 0])
"""
def __init__(
self,
n_components=1,
*,
covariance_type="full",
tol=1e-3,
reg_covar=1e-6,
max_iter=100,
n_init=1,
init_params="kmeans",
weights_init=None,
means_init=None,
precisions_init=None,
random_state=None,
warm_start=False,
verbose=0,
verbose_interval=10,
):
super().__init__(
n_components=n_components,
tol=tol,
reg_covar=reg_covar,
max_iter=max_iter,
n_init=n_init,
init_params=init_params,
random_state=random_state,
warm_start=warm_start,
verbose=verbose,
verbose_interval=verbose_interval,
)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ["spherical", "tied", "diag", "full"]:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']" %
self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init, self.n_components,
n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(
self.precisions_init,
self.covariance_type,
self.n_components,
n_features,
)
def _initialize(self, X, resp, B=None):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type, B=B)
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type == "full":
self.precisions_cholesky_ = np.array([
linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init
])
elif self.covariance_type == "tied":
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _m_step(self, X, log_resp, B=None):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type, B=B)
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(X, self.means_,
self.precisions_cholesky_,
self.covariance_type)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _get_parameters(self):
return (
self.weights_,
self.means_,
self.covariances_,
self.precisions_cholesky_,
)
def _set_parameters(self, params):
(
self.weights_,
self.means_,
self.covariances_,
self.precisions_cholesky_,
) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == "full":
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == "tied":
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_**2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == "full":
cov_params = self.n_components * n_features * (n_features +
1) / 2.0
elif self.covariance_type == "diag":
cov_params = self.n_components * n_features
elif self.covariance_type == "tied":
cov_params = n_features * (n_features + 1) / 2.0
elif self.covariance_type == "spherical":
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
The input samples.
Returns
-------
bic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log(
X.shape[0])
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
The input samples.
Returns
-------
aic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
# + id="OvjgUAvbKyni" cellView="form"
#@title import
import os
from ipfn import ipfn
import matplotlib.pyplot as plt
from sklearn.metrics import rand_score
from mpl_toolkits.axes_grid1 import make_axes_locatable
import json
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from collections import defaultdict
from scipy.io import mmread, mmwrite
from scipy.sparse import csr_matrix
from sklearn.neighbors import KDTree
from scipy.stats import entropy
from itertools import combinations
def nd(arr):
return np.asarray(arr).reshape(-1)
def yex(ax):
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
return ax
from upsetplot import from_memberships, plot as upsetplot, from_contents
fsize=20
plt.rcParams.update({'font.size': fsize})
# %config InlineBackend.figure_format = 'retina'
# + id="_NEy7vOtozRT" cellView="form"
#@title Sina's functions
def read_int_list(fname, lst=[]):
with open(fname) as f:
for idx, i in enumerate(f.readlines()):
lst.append(int(i.strip()))
def read_str_list(fname, lst=list):
with open(fname, 'r') as f:
for idx, line in enumerate(f.readlines()):
lst.append(line.strip())
def map_dict_list_keys_values(dct, k_lst, v_lst, nd=defaultdict(list)):
for k,v in dct.items():
nd[k_lst[k]] = [v_lst[i] for i in v]
def map_dict_list_keys(dct, k_lst, v_lst, nd=defaultdict(list)):
for k,v in dct.items():
nd[k_lst[k]] = v
def map_dict_list_values(dct, k_lst, v_lst, nd=defaultdict(list)):
for k,v in dct.items():
nd[k] = [v_lst[i] for i in v]
def read_markers_ec(fname, markers_ec=defaultdict(list)):
with open(fname, 'r') as f:
for idx, line in enumerate(f.readlines()):
ct_id, gene_ids = line.strip().split('\t')
markers_ec[int(ct_id)] = [int(i) for i in gene_ids.split(',')]
def sanitize_mtx(mtx):
cell_count_mask = mtx.sum(1) > 0 # count for each cell
gene_count_mask = mtx.sum(0) > 0 # count for each gene
genes_detected_mask = (mtx > 0).sum(1) > 0 # n genes per cell
cells_detected_mask = (mtx > 0).sum(0) > 0 # n cells per gene
row_mask = np.logical_and(cell_count_mask, genes_detected_mask)
col_mask = np.logical_and(gene_count_mask, cells_detected_mask)
return (row_mask, col_mask)
def drop_markers(markers_ec, drop_ids):
if len(drop_ids) == 0:
return
for k, v in markers_ec.items():
gidx = len(v) - 1
while gidx > -1:
mg = markers_ec[k][gidx]
if mg in drop_ids:
markers_ec[k].pop(gidx)
else:
to_sub = 0
for d in drop_ids:
if d < mg:
to_sub += 1
markers_ec[k][gidx] -= to_sub
gidx -= 1
# testing data
# drop_ids = set([2, 3, 34, 42])
# truth = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
# 1: [7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
# 2: [19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
# 3: [0, 3, 6, 7, 24, 29, 30, 31, 32, 33],
# 4: [0, 4, 5, 6, 7, 18, 23, 24, 30, 31, 34],
# 5: [2, 22, 23, 24, 30, 35, 36, 37, 38, 39],
# 6: [0, 3, 4, 6, 7, 24, 30, 31, 32, 40]}
# drop_markers(markers_ec, set(drop_genes))
# markers_ec == truth
def do_ipf(mtx):
rows, cols = mtx.shape
n = 1
aggregates = [
np.ones(rows)*n/rows, # rows, each cell uniform
nd(mtx.sum(0))/mtx.sum() # columns, each tag proportional
]
dimensions = [[0], [1]]
IPF = ipfn.ipfn(mtx, aggregates, dimensions, max_iteration=10000)
m = IPF.iteration()
newmtx = m*mtx.sum()
return newmtx.astype(int)
def get_marker_centroids(X, markers_ec, method="mean"):
n_clusters = len(list(markers_ec.keys()))
_, n_features = X.shape
marker_centroids = np.ones((n_clusters, n_features)) * 1e-5
for k, v in markers_ec.items():
submx = X[:, v]
if method == 'max':
repl = submx.max(0)
else:
repl = submx.mean(0)
marker_centroids[k][v] = repl
return marker_centroids
def get_centroids(X, z):
clusters = np.sort(np.unique(z))
n_clusters, = clusters.shape
_, n_features = X.shape
centroids = np.ones((n_clusters, n_features))
for i, g in enumerate(clusters):
centroids[i] = (X[np.where(z==g)[0]].mean(0))
return centroids
# + id="8fCbXVL3JYEh"
sample = "293T"
observation = "obs6"
base_data = f"azucar/analysis/{sample}/{observation}/out"
base_mark = f"azucar/analysis/{sample}/{observation}/assign"
markers_fn = os.path.join(base_mark, "markers.txt")
matrix_fn = os.path.join(base_data, "matrix.mtx")
genes_fn = os.path.join(base_data, "genes.txt")
barcodes_fn = os.path.join(base_data, "barcodes.txt")
labels_fn = "./labels.txt"
# labels = pd.read_csv(labels_fn, sep="\t", header=None, names=["celltype"])
# z = labels["celltype"].astype("category").cat.codes.values
# + id="Hyk7X06YKxeq"
# !gunzip $base_data/*.gz
# + id="x4Vr7YayK3om"
# index the markers -> markers.ec marker_genes.txt groups.txt
# !./index.py $markers_fn ./
# get the gene ids -> select.txt
# !./select.py $markers_fn $genes_fn ./
# + id="m2-KfAw0sIX0"
# column indices to select from gene matrix
sel = []
read_int_list("select.txt", sel)
# the group names
groups = []
read_str_list("groups.txt", groups)
# the barcode names
barcodes = []
read_str_list(barcodes_fn, barcodes)
# the gene names
genes = []
read_str_list(genes_fn, genes)
# the marker gene names that are being selected for
# NOTE: sel is the index "vector" for marker_genes
marker_genes = []
read_str_list("marker_genes.txt", marker_genes)
# markers.ec, maps groups (indices) to marker genes (indices)
markers_ec = defaultdict(list)
read_markers_ec("markers.ec", markers_ec)
# named groups to named marker genes (value version of markers_ec)
markers = defaultdict(list)
map_dict_list_keys_values(markers_ec, groups, marker_genes, markers)
# read in matrix and select columns and write back to disc
M = mmread(matrix_fn).toarray()
# sanitize gene count matrix (remove cells / genes) and remove genes from marker_ec
row_mask, col_mask = sanitize_mtx(M)
barcodes = np.array(barcodes)[row_mask]
drop_genes = np.arange(M.shape[1])[~col_mask]
drop_markers(markers_ec, set(drop_genes))
mtx = M[row_mask][:,col_mask].astype(int)
mtx_ipf = do_ipf(mtx.copy())
mmwrite("matrix_select_ipf.mtx", csr_matrix(mtx_ipf[:,sel]))
dbco = mtx_ipf[:,np.where(np.array(genes) == 'dbco')[0][0]]
with open("dbco.txt", 'w') as f:
for v in dbco:
f.write(f'{v}\n')
# + colab={"base_uri": "https://localhost:8080/"} id="_e_Z_V_XqqVh" outputId="4f0dc5d5-90d1-487a-8f95-8275fa9f93c3"
# read in gene count matrix
G = mmread("matrix_select_ipf.mtx").toarray()
n_clusters = len(markers_ec.keys())
n_samples, n_features = G.shape
print(n_clusters, *G.shape, sep=", ")
# get the centroids for the existing data
data = {
"X": {
"raw_log1p": np.log1p(G),
}
}
method = "raw_log1p"
# have to initialize the clusters by first mean centering alternative is to zscore the means
X_mean = data["X"][method].mean(0)
X_init = (data["X"][method] - X_mean)
centroids_init = get_marker_centroids(X_init, markers_ec, "max")
tree = KDTree(centroids_init, metric="euclidean")
nearest_dist, nearest_ind = tree.query(X_init, k=1)
# assign cells to clusters
p = 1
resp = np.ones((n_samples, n_clusters)) * (1 - p) / (n_clusters - 1)
resp[np.arange(n_samples), nearest_ind.flatten()] = p
# initialize params
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
# then once we have the means, add the previously subtracted means back
means_init = np.dot(resp.T, X_init) / nk[:, np.newaxis]
means_init += X_mean
# alternative to uniform weights is nk / n_samples (using the new assignments)
uniform_weights = np.array([1. / n_clusters] * n_clusters)
# alternative is to compute precisions by first doing M-step to get gaus params
identity_precisions = np.repeat(
np.array([np.eye(data['X'][method].shape[1])]), n_clusters, 0)
gmm_params = {
"n_components": n_clusters,
"means_init": None, # to be added
"weights_init": None, # to be added
"precisions_init": None, # to be added
"random_state": 0,
"reg_covar": 1e-8,
"verbose": 2,
"n_init": 1,
"max_iter": 1000,
"tol": 1e-3,
"init_params": "random"
}
params = {
**gmm_params,
"means_init": means_init, # centroids,
"weights_init": uniform_weights,
"precisions_init": identity_precisions
}
# + colab={"base_uri": "https://localhost:8080/"} id="CEs_4dHuGPVH" outputId="b2f2cdc4-4c36-43a2-c407-c1d95fa95465"
gmm = ImprovedGaussianMixture(**params)
print(method, json.dumps(params, indent=4, default=str))
labels = gmm.fit_predict(data["X"][method], B=markers_ec)
means = gmm.means_
prob = gmm.predict_proba(data["X"][method])
ent = entropy(prob, axis=1)
# make df
df = pd.DataFrame(G, columns=[f"{i}_ipf" for i in marker_genes])
df["dbco_ipf"] = dbco
# original counts
for idx, v in enumerate(mtx.T):
df[f"{genes[idx]}"] = v
df["label_id"] = labels
df["label"] = df["label_id"].map({i:groups[i] for i in range(len(groups))})
df["ent"] = ent
df.index = barcodes
for idx, p in enumerate(prob.T):
df[f"mahalanobis_{idx}"] = p
# with open(labels_fn, 'w') as f:
# for bc, l in zip(barcodes,labels):
# f.write(f'{bc}\t{l}\n')
# + colab={"base_uri": "https://localhost:8080/", "height": 661} id="6H6k6NAJS9Wv" outputId="f207ca73-6a8f-4105-bc3c-90ed85073226"
df
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="hMforUUBwLA2" outputId="c1feab04-cfa9-45ea-fcd3-0975427cb09a"
fig, ax = plt.subplots(figsize=(5,5))
adj_ent = -np.log10(ent)
x = np.sort(adj_ent)[::-1]
y = np.arange(ent.shape[0])
ax.scatter(x,y)
ax.set(**{
"yscale": "symlog",
"xlabel": "-log10(entropy)",
"ylabel": "Cell rank"
})
elim = 5
ind = np.where(x > elim)[0][-1]
ecutoff = x[ind]
ax.axvline(x=ecutoff, color="k")
ax.axhline(y=ind, color="k")
fig.show()
# + colab={"base_uri": "https://localhost:8080/"} id="NUpPTP9kwTX8" outputId="08315bae-529a-40e9-f54c-4c5f20f007e4"
ind, 10**(-elim)
# + colab={"base_uri": "https://localhost:8080/"} id="AdNwwD-cI6Cv" outputId="5538f66d-ccb6-4b09-b9e4-31d5fa01fa9f"
df.query(f"ent < {10**(-elim)}")["label"].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 174} id="DJlbo2mEGR7F" outputId="7e87300d-5fba-4bee-d71e-9e6acb25d5c9"
df.query(f"ent < {10**(-elim)}").groupby("label")[marker_genes].mean().astype(int)
# + colab={"base_uri": "https://localhost:8080/", "height": 174} id="KrraAoRstGGS" outputId="1314e9b9-ddef-4bb8-88f1-704e6da2fdac"
norm = df.query(f"ent < {10**(-elim)}").groupby("label")[marker_genes].mean().astype(int)
(norm.div(norm.sum(1), axis="rows")*100).astype(int)
# + colab={"base_uri": "https://localhost:8080/"} id="W9CYGzKJHrEu" outputId="31d5f1fb-b95b-43b1-835c-17a894de989e"
print(markers_ec)
print(groups)
print(marker_genes)
print(markers)
# + colab={"base_uri": "https://localhost:8080/"} id="a-_O71AfIExw" outputId="41110300-d695-4321-e490-82c2fe53544f"
# !cat $markers_fn
# + id="NMNaGHc7FbrJ"
df.to_csv('assignments.txt.gz', sep='\t', compression='gzip')
# + colab={"base_uri": "https://localhost:8080/"} id="7Dz0-6reFffp" outputId="b39a7fb9-3e78-4c88-ab2d-ed6aac162432"
M.sum(0) / M.sum() * 100
# + colab={"base_uri": "https://localhost:8080/"} id="DMtdPpqVJH4K" outputId="d1f38640-169e-402c-851a-a2d576f8e549"
genes
# + id="ic2wdU7oJKzV"
| analysis/293T/obs6/assign.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import shapely
import os
import geopandas as gpd
# +
demographics = gpd.read_file('../KMeans/census.geoJSON')
for folder in os.listdir():
if os.path.isdir(folder):
for file in os.listdir(folder):
if file.endswith('.csv'):
#print('--------------')
df = pd.read_csv(folder +'/' + file, sep=';')
for i in range(len(df)):
value_str = df.loc[i,'Location']
x,y = map(float,value_str[1:-1].split(','))
point = shapely.geometry.Point(y,x)
if not np.any(demographics.loc[:,'geometry'].apply(point.intersects)):
# print(df.loc[i,:])
df = df.drop(i)
os.remove(folder + '/' + file)
df.to_csv(folder + '/' + file, sep=';')
print(file)
| individual_work/marielle/data/Data Point Detection and Clean up.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.9 64-bit (''iguanas_os_dev'': venv)'
# name: python3
# ---
# # Rule Generator (Decision Tree algorithm) Example
# The Rule Generator (Decision Tree algorithm) is used to create rules based on a labelled dataset. This algorithm generate rules by extracting the highest performing branches from a tree ensemble model.
# ## Requirements
# To run, you'll need the following:
#
# * A labelled, processed dataset (nulls imputed, categorical features encoded).
# ----
# ## Import packages
# +
from iguanas.rule_generation import RuleGeneratorDT
from iguanas.metrics.classification import FScore
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
# -
# ## Read in data
# Let's read in some labelled, processed dummy data.
X_train = pd.read_csv(
'dummy_data/X_train.csv',
index_col='eid'
)
y_train = pd.read_csv(
'dummy_data/y_train.csv',
index_col='eid'
).squeeze()
X_test = pd.read_csv(
'dummy_data/X_test.csv',
index_col='eid'
)
y_test = pd.read_csv(
'dummy_data/y_test.csv',
index_col='eid'
).squeeze()
# ----
# ## Generate rules
# ### Set up class parameters
# Now we can set our class parameters for the Rule Generator. Here, we're using the F1 score as the main rule performance metric (you can choose a different function from the `metrics.classification` module or create your own).
#
# **Note that if you're using the FScore, Precision or Recall score as the optimisation function, use the *FScore*, *Precision* or *Recall* classes in the *metrics.classification* module rather than the same functions from Sklearn's *metrics* module, as the former are ~100 times faster on larger datasets.**
#
# **Please see the class docstring for more information on each parameter.**
fs = FScore(beta=1)
params = {
'n_total_conditions': 4,
'metric': fs.fit,
'tree_ensemble': RandomForestClassifier(n_estimators=100, random_state=0),
'precision_threshold': 0.5,
'num_cores': 1,
'target_feat_corr_types': 'Infer',
'verbose': 1
}
# ### Instantiate class and run fit method
# Once the parameters have been set, we can run the `fit` method to generate rules.
rg = RuleGeneratorDT(**params)
X_rules = rg.fit(
X=X_train,
y=y_train,
sample_weight=None
)
# ### Outputs
# The `fit` method returns a dataframe giving the binary columns of the generated rules as applied to the training dataset. See the `Attributes` section in the class docstring for a description of each attribute generated:
X_rules.head()
# ----
# + [markdown] tags=[]
# ## Apply rules to a separate dataset
# -
# Use the `transform` method to apply the generated rules to a separate dataset.
X_rules_test = rg.transform(X=X_test)
# ### Outputs
# The `transform` method returns a dataframe giving the binary columns of the rules as applied to the given dataset:
X_rules_test.head()
# ----
| iguanas/rule_generation/examples/rule_generator_dt_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import cv2
path = 'd:/Dataset/'
imgpath1 = path + '4.2.01.tiff'
imgpath2 = path + '4.2.03.tiff'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
mult = img1 * img2
div = img1 / img2
titles = ['Liquid Drop', 'Mandrill', 'Multiplication', 'Division']
# +
images = [img1, img2, mult, div]
for i in range(4):
plt.subplot(1, 4, i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.axis('off')
plt.show()
# +
alpha = 0.5
beta = 0.5
gamma = 0
# img1 * alpha + img2 * beta + gamma
output = cv2.addWeighted(img1, alpha, img2, beta, gamma)
titles = ['Liquid Drop', 'Mandrill', 'Weighted Addition']
images = [img1, img2, output]
# -
for i in range(3):
plt.subplot(1, 3, i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.axis('off')
plt.show()
# +
import time
path = 'd:/Dataset/'
path = 'd:/Dataset/'
imgpath1 = path + '4.2.01.tiff'
imgpath2 = path + '4.2.03.tiff'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
for i in np.linspace(0, 1, 1000):
alpha = i
beta = 1 - alpha
output = cv2.addWeighted(img1, alpha, img2, beta, 0)
cv2.imshow('Transition', output)
time.sleep(0.0001)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
# +
path = 'd:/Dataset/'
path = 'd:/Dataset/'
imgpath1 = path + '4.2.01.tiff'
imgpath2 = path + '4.2.03.tiff'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
# -
def emptyFunction():
pass
output = cv2.addWeighted(img1, 0.5, img2, 0.5, 0)
windowName = 'Transition Demo'
# +
cv2.namedWindow(windowName)
cv2.createTrackbar('Alpha', windowName, 0, 1000, emptyFunction)
while True:
cv2.imshow(windowName, output)
if cv2.waitKey(1) == 27:
break
alpha = cv2.getTrackbarPos('Alpha', windowName) / 1000
beta = 1 - alpha
output = cv2.addWeighted(img1, alpha, img2, beta, 0)
print(alpha, beta)
cv2.destroyAllWindows()
# +
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
r, g, b = cv2.split(img1)
titles = ['Original', 'Red', 'Green', 'Blue']
images = [cv2.merge((r, g, b)), r, g, b]
# +
plt.subplot(2, 2, 1)
plt.imshow(images[0])
plt.title(titles[0])
plt.axis('off')
plt.subplot(2, 2, 2)
plt.imshow(images[1], cmap='Reds')
plt.title(titles[1])
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(images[2], cmap='Greens')
plt.title(titles[2])
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(images[3], cmap='Blues')
plt.title(titles[3])
plt.axis('off')
plt.show()
# +
path = 'd:/Dataset/'
path = 'd:/Dataset/'
imgpath1 = path + '4.2.01.tiff'
imgpath2 = path + '4.2.03.tiff'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
# -
img3 = cv2.bitwise_not(img1)
img4 = cv2.bitwise_and(img1, img2)
img5 = cv2.bitwise_or(img1, img2)
img6 = cv2.bitwise_xor(img1, img2)
# +
titles = ['Image 1', 'Image 2', 'Image NOT', 'AND', 'OR', 'XOR']
images = [img1, img2, img3, img4, img5, img6]
for i in range(6):
plt.subplot(2, 3, i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.axis('off')
plt.show()
# -
x = np.uint8([240])
y = np.uint8([20])
print(x + y) # (x + y) % 256
print(cv2.add(x, y))
# +
add1 = img1 + img2
add2 = cv2.add(img1, img2)
titles = ['Liquid Drop', 'Mandrill', 'NumPy addition', 'cv2.add()']
images = [img1, img2, img3, img4]
# -
for i in range(4):
plt.subplot(1, 4, i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.axis('off')
plt.show()
| Section06/03_Image_Processing.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# <img src="https://julialang.org/assets/infra/logo.svg" alt="Julia" width="200" style="max-width:100%;">
#
# [](https://mybinder.org/v2/gh/alan-turing-institute/MLJ.jl/master?filepath=binder%2FMLJ_demo.ipynb)
#
# ## Lightning encounter with Julia programming language
#
# ###### Julia related content prepared by [@ablaom](https://github.com/ablaom)
#
# Interacting with Julia at the REPL, or in a notebook, feels very much
#
# the same as python, MATLAB or R:
print("Hello world!")
2 + 2
typeof(42.0)
# ## Just-in-time compilation
#
# Here's a function used in generating the famous Mandelbrot set,
#
# which looks pretty much the same in python, MATLAB or R:
function mandel(z)
c = z
maxiter = 80
for n in 1:maxiter
if abs(z) > 2
return n-1
end
z = z^2 + c
end
return maxiter
end
# In particular, notice the absence of type annotations. The crucial difference is what happens when you call this function:
@time mandel(1.2) # time call on a Float64
# This is actually pretty lousy, slower than python. However, trying again:
@time mandel(3.4) # time on another Float64
# Thousands of times faster, second time around! What happenend?
#
# When you call `mandel(1.2)` in python, say, then the defining code
# is interpreted each time. When you call `mandel(1.2)` in Julia for
# the first time Julia inspects the of the argument, namely `Float64`,
# and using this information *compiles* an efficient type-specfic
# version of `mandel`, which it caches for use in any subsequent call
# *on the same type*. Indeed if we call `mandel` on a new type, a new
# compilation will be needed:
@time mandel(1.0 + 5.0im)
@time mandel(2.0 + 0.5im)
# Since plotting the Mandelbrot set means calling `mandel` millions of
# times on the same type, the advantage of just-in-time compilation is
# obvious.
# +
using PyPlot
plt.imshow([mandel(x + y * im) for y = -1:0.001:1, x = -2:0.001:1])
# -
# ## Multiple dispatch
#
# You will never see anything like `A.add(B)` in Julia because Julia
# is not a traditional object-oriented language. In Julia, function and
# structure are kept separate, with the help of abstract types and
# multiple dispatch, as we explain next
# In addition to regular concrete types, such as `Float64` and
# `String`, Julia has a built-in heirarchy of *abstract* types. These
# generally have subtypes but no instances:
typeof(42)
supertype(Int64)
supertype(Signed)
subtypes(Integer)
Bool <: Integer # is Bool a subtype of Integer?
Bool <: String
# In Julia, which is optionally typed, one uses type annotations to
# adapt the behaviour of functions to their types. If we define
divide(x, y) = x / y
# then `divide(x, y)` will make sense whenever `x / y` makes sense (for
# the built-in function `/`). For example, we can use it to divide two
# integers, or two matrices:
divide(1, 2)
divide([1 2; 3 4], [1 2; 3 7])
# To vary the behaviour for specific types we make type annotatations:
divide(x::Integer, y::Integer) = floor(x/y)
divide(x::String, y::String) = join([x, y], " / ")
divide(1, 2)
divide("Hello", "World!")
# In the case of `Float64` the original "fallback" method still
# applies:
divide(1.0, 2.0)
# ## User-defined types
#
# Users can define their own abstract types and composite types:
# +
abstract type Organism end
struct Animal <: Organism
name::String
is_hervibore::Bool
end
struct Plant <: Organism
name::String
is_flowering::Bool
end
describe(o::Organism) = string(o.name) # fall-back method
function describe(p::Plant)
if p.is_flowering
text = " is a flowering plant."
else
text = " is a non-flowering plant."
end
return p.name*text
end
# -
describe(Animal("Elephant", true))
describe(Plant("Fern", false))
# ## Type inference and multiple dispatch
#
# *Type inference* is the process of identifying the types of the arguments to dispatch the right method.
#
# Blogpost about [type dispatch](http://www.stochasticlifestyle.com/type-dispatch-design-post-object-oriented-programming-julia/) by [<NAME>](http://www.chrisrackauckas.com/).
# +
function function_x(x::String)
println("this is a string: $x")
end
function function_x(x::Int)
println("$(x^2) is the square of $x")
end
# -
# each call to the function_x() will dispatch the corresponding method depending on the parameter's type
function_x("a string")
function_x(2)
# ## Automatic differentiation
#
# Differentiation of almost arbitrary programs with respect to their input. ([source]( https://render.githubusercontent.com/view/ipynb?commit=89317894e2e5370a80e45d52db8a4055a4fdecd6&enc_url=68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f6d6174626573616e636f6e2f454d455f4a756c69615f776f726b73686f702f383933313738393465326535333730613830653435643532646238613430353561346664656364362f315f496e74726f64756374696f6e2e6970796e62&nwo=matbesancon%2FEME_Julia_workshop&path=1_Introduction.ipynb&repository_id=270611906&repository_type=Repository#Automatic-differentiation) by [@matbesancon](https://github.com/matbesancon))
# +
using ForwardDiff
function sqrt_babylonian(s)
x = s / 2
while abs(x^2 - s) > 0.001
x = (x + s/x) / 2
end
x
end
# -
sqrt_babylonian(2) - sqrt(2)
@show ForwardDiff.derivative(sqrt_babylonian, 2);
@show ForwardDiff.derivative(sqrt, 2);
# ## Unitful computations
# Physicists' dreams finally made true. ([soure](https://render.githubusercontent.com/view/ipynb?commit=89317894e2e5370a80e45d52db8a4055a4fdecd6&enc_url=68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f6d6174626573616e636f6e2f454d455f4a756c69615f776f726b73686f702f383933313738393465326535333730613830653435643532646238613430353561346664656364362f315f496e74726f64756374696f6e2e6970796e62&nwo=matbesancon%2FEME_Julia_workshop&path=1_Introduction.ipynb&repository_id=270611906&repository_type=Repository#Unitful-computations) by [@matbesancon](https://github.com/matbesancon))
using Unitful
using Unitful: J, kg, m, s
3J + 1kg * (1m / 1s)^2
# <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/master/material/MLJLogo2.svg?sanitize=true" alt="MLJ" width="200" style="max-width:100%;">
#
# # MLJ
#
# MLJ (Machine Learning in Julia) is a toolbox written in Julia providing a common interface and meta-algorithms for selecting, tuning, evaluating, composing and comparing machine learning models written in Julia and other languages. MLJ is released under the MIT licensed and sponsored by the [Alan Turing Institute](https://www.turing.ac.uk/).
# ### The MLJ Universe
#
# The functionality of MLJ is distributed over a number of repositories
# illustrated in the dependency chart below.
#
# [MLJ](https://github.com/alan-turing-institute/MLJ) * [MLJBase](https://github.com/alan-turing-institute/MLJBase.jl) * [MLJModelInterface](https://github.com/alan-turing-institute/MLJModelInterface.jl) * [MLJModels](https://github.com/alan-turing-institute/MLJModels.jl) * [MLJTuning](https://github.com/alan-turing-institute/MLJTuning.jl) * [MLJLinearModels](https://github.com/alan-turing-institute/MLJLinearModels.jl) * [MLJFlux](https://github.com/alan-turing-institute/MLJFlux.jl) * [MLJTutorials](https://github.com/alan-turing-institute/MLJTutorials) * [MLJScientificTypes](https://github.com/alan-turing-institute/MLJScientificTypes.jl) * [ScientificTypes](https://github.com/alan-turing-institute/ScientificTypes.jl)
#
#
# <div align="center">
# <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/master/material/MLJ_stack.svg?sanitize=true" alt="Dependency Chart">
# </div>
#
# *Dependency chart for MLJ repositories. Repositories with dashed
# connections do not currently exist but are planned/proposed.*
# MLJ provides access to to a wide variety of machine learning models. For the most up-to-date list of available models `models()`.
using MLJ
models()
# ## Fit, predict, transform
#
# The following example is using the `fit()`, `predict()`, and `transform()` functions of MLJ.
import Statistics
using PrettyPrinting
using StableRNGs
X, y = @load_iris;
# let's also load the DecisionTreeClassifier:
@load DecisionTreeClassifier
tree_model = DecisionTreeClassifier()
# ## MLJ Machine
#
# In MLJ, a *model* is an object that only serves as a container for the hyperparameters of the model. A *machine* is an object wrapping both a model and data and can contain information on the *trained* model; it does *not* fit the model by itself. However, it does check that the model is compatible with the scientific type of the data and will warn you otherwise.
tree = machine(tree_model, X, y)
# A machine is used both for supervised and unsupervised model. In this tutorial we give an example for the supervised model first and then go on with the unsupervised case.
#
# ## Training and testing a supervised model
#
# Now that you've declared the model you'd like to consider and the data, we are left with the standard training and testing step for a supervised learning algorithm.
#
# ## Splitting the data
#
# To split the data into a training and testing set, you can use the function `partition` to obtain indices for data points that should be considered either as training or testing data:
rng = StableRNG(566)
train, test = partition(eachindex(y), 0.7, shuffle=true, rng=rng)
test[1:3]
# ## Fitting and testing the machine
#
# To fit the machine, you can use the function `fit!` specifying the rows to be used for the training:
fit!(tree, rows=train)
# Note that this **modifies** the machine which now contains the trained parameters of the decision tree. You can inspect the result of the fitting with the `fitted_params` method:
fitted_params(tree) |> pprint
# This `fitresult` will vary from model to model though classifiers will usually give out a tuple with the first element corresponding to the fitting and the second one keeping track of how classes are named (so that predictions can be appropriately named).
#
# You can now use the machine to make predictions with the `predict` function specifying rows to be used for the prediction:
ŷ = predict(tree, rows=test)
@show ŷ[1]
# Note that the output is probabilistic, effectively a vector with a score for each class. You could get the mode by using the `mode` function on `ŷ` or using `predict_mode`:
ȳ = predict_mode(tree, rows=test)
@show ȳ[1]
@show mode(ŷ[1])
# To measure the discrepancy between ŷ and y you could use the average cross entropy:
mce = cross_entropy(ŷ, y[test]) |> mean
round(mce, digits=4)
# # [Check out MLJ example with TreeParzen.jl](TreeParzen_example.ipynb)
# # A more advanced example
using MLJ
using StableRNGs
import DataFrames
@load RidgeRegressor pkg=MultivariateStats
# In this example we will show how to generate a model from a network; there are two approaches:
#
# * using the `@from_network` macro
# * writing the model in full
#
# the first approach should usually be the one considered as it's simpler.
#
# Generating a model from a network allows subsequent composition of that network with other tasks and tuning of that network.
#
# ### Using the @from_network macro
#
# Let's define a simple network
#
# *Input layer*
# +
rng = StableRNG(6616) # for reproducibility
x1 = rand(rng, 300)
x2 = rand(rng, 300)
x3 = rand(rng, 300)
y = exp.(x1 - x2 -2x3 + 0.1*rand(rng, 300))
X = DataFrames.DataFrame(x1=x1, x2=x2, x3=x3)
test, train = partition(eachindex(y), 0.8);
Xs = source(X)
ys = source(y, kind=:target)
# -
# *First layer*
# +
std_model = Standardizer()
stand = machine(std_model, Xs)
W = MLJ.transform(stand, Xs)
box_model = UnivariateBoxCoxTransformer()
box = machine(box_model, ys)
z = MLJ.transform(box, ys)
# -
# *Second layer*
ridge_model = RidgeRegressor(lambda=0.1)
ridge = machine(ridge_model, W, z)
ẑ = predict(ridge, W)
# *Output*
ŷ = inverse_transform(box, ẑ)
# No fitting has been done thus far, we have just defined a sequence of operations.
#
# To form a model out of that network is easy using the `@from_network` macro:
@from_network CompositeModel(std=std_model, box=box_model,
ridge=ridge_model) <= ŷ;
# The macro defines a constructor CompositeModel and attributes a name to the different models; the ordering / connection between the nodes is inferred from `ŷ` via the `<= ŷ`.
#
# **Note**: had the model been probabilistic (e.g. `RidgeClassifier`) you would have needed to add `is_probabilistic=true` at the end.
cm = machine(CompositeModel(), X, y)
res = evaluate!(cm, resampling=Holdout(fraction_train=0.8, rng=51),
measure=rms)
round(res.measurement[1], sigdigits=3)
# ## Check out more [Data Science tutorials in Julia](https://alan-turing-institute.github.io/DataScienceTutorials.jl/).
| binder/MLJ_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #
# %load_ext autoreload
# %autoreload 2
from ramprate.load_dataset import load_epacems, load_epa_crosswalk
from ramprate.build_features import uptime_events, calc_distance_from_downtime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] heading_collapsed=true
# ## CEMS Processing
# + hidden=true
# all states, 1 year
cols = ['plant_id_eia', 'unitid', 'operating_datetime_utc',
'operating_time_hours', 'gross_load_mw', 'steam_load_1000_lbs',
'heat_content_mmbtu', 'unit_id_epa',
]
cems = load_epacems(states=None, years=[2019], columns=cols, engine='pandas')
# + hidden=true
# %%time
idx = pd.IndexSlice
cems.sort_values(by=['unit_id_epa', 'operating_datetime_utc'], inplace=True)
cems.set_index(['unit_id_epa', 'operating_datetime_utc'], drop=False, inplace=True)#, verify_integrity=True)
# + [markdown] hidden=true
# ### Calculate ramps and distance from downtime
# + hidden=true
# %time calc_distance_from_downtime(cems) # in place
print('')
# + hidden=true
units = cems.groupby(level="unit_id_epa")
# ramp rate: MW / hour
cems['ramp_rate'] = units["gross_load_mw"].transform(lambda x: x.diff())
# + hidden=true
cems.head()
# + hidden=true
cems['hours_distance'] = (cems[['hours_from_startup', 'hours_to_shutdown']].min(axis=1))
# + hidden=true
cems['nearest_to_startup'] = cems['hours_from_startup'] < cems['hours_to_shutdown']
# randomly allocate midpoints
rng = np.random.default_rng(seed=42)
rand_midpoints = (cems['hours_from_startup'] == cems['hours_to_shutdown']) & rng.choice(np.array([True, False]), size=len(cems))
cems.loc[rand_midpoints, 'is_startup'] = True
del rand_midpoints
# -
# ## Aggregate
# ### Max ramp rates
cems['hours_distance_clipped'] = cems['hours_distance'].clip(upper=10) # threshold
max_ramps = (cems.drop(columns=['unit_id_epa']) # resolve ambiguity between index and col with same name
.groupby(['unit_id_epa', 'hours_distance_clipped', 'nearest_to_startup'])['ramp_rate']
.agg(['max', 'min'])
.add_suffix('_ramp_rate')
)
max_ramps['abs_max_ramps'] = max_ramps[['max_ramp_rate', 'min_ramp_rate']].abs().max(axis=1)
# ### Other aggregates
# #### Per-unit metrics
per_unit_aggs = cems.groupby(level='unit_id_epa')['gross_load_mw'].agg(['max', 'count', 'sum']).add_suffix('_load')
per_unit_aggs['utilization'] = cems['gross_load_mw'].gt(0).groupby(level='unit_id_epa').mean()
per_unit_aggs['capacity_factor'] = per_unit_aggs['sum_load'].div(per_unit_aggs['max_load'] * per_unit_aggs['count_load'])
# #### uptime event analysis
# Summary stats about the distribution of uptime events can help distinguish different types of plant operation
# %time events = uptime_events(cems)
events.sample(3)
uptime_aggs = (events['duration_hours'].groupby(level='unit_id_epa').agg(['count', 'mean', 'std'])
.rename(columns={
'count': 'n_uptime_events',
'mean': 'mean_uptime_duration',
'std': 'std_uptime_duration'}
)
)
# ## Join other info
# ### Join other aggregates
max_ramps = (max_ramps
.join(per_unit_aggs, on='unit_id_epa')
.join(uptime_aggs, on='unit_id_epa')
)
del per_unit_aggs, uptime_aggs
# ### Join Crosswalk
# #### Prep: bring in surrogate key
key_map = cems[['plant_id_eia', 'unitid', 'unit_id_epa']].reset_index(drop=True).drop_duplicates()
crosswalk = load_epa_crosswalk()
crosswalk = crosswalk.merge(key_map,
left_on=['CAMD_PLANT_ID', 'CAMD_UNIT_ID'],
right_on=['plant_id_eia', 'unitid'],
how='right',
validate='many_to_one' # checks if right (key_map) is unique
)
# 18 failed matches
crosswalk.loc[crosswalk.isna().mean(axis=1) > 0.75, ['plant_id_eia', 'unitid', 'unit_id_epa', 'CAMD_PLANT_ID', 'CAMD_UNIT_ID']]
subset = ['CAMD_FACILITY_NAME', 'CAMD_PLANT_ID',
'CAMD_UNIT_ID', 'CAMD_GENERATOR_ID', 'CAMD_NAMEPLATE_CAPACITY', 'EIA_PLANT_NAME', 'EIA_PLANT_ID',
'EIA_GENERATOR_ID', 'EIA_NAMEPLATE_CAPACITY', 'EIA_BOILER_ID',
'EIA_UNIT_TYPE', 'EIA_FUEL_TYPE',]
crosswalk.loc[crosswalk['EIA_PLANT_ID'] == 116, subset]
# #### Aggregate combustor/generator types
# About a third of combustor/generator relationships are not one-to-one, meaning multiple unit and fuel types can be involved. **For a fast first analysis, I resolved ambiguity in categories by labeling each generator with the most frequent category.** The most frequent type may be different from the highest capacity or most energy produced, etc.
#
# It shouldn't be too hard to refine these aggregations with a more appropriate metric. While handling that, I can also aggregate nameplate capacity.
subset = ['CAMD_FUEL_TYPE', 'EIA_FUEL_TYPE', 'EIA_UNIT_TYPE']
generator_aggs = (crosswalk
.fillna(dict.fromkeys(subset, 'NaN')) # otherwise pd.Series.Mode returns array([])
.groupby(['CAMD_PLANT_ID', 'CAMD_GENERATOR_ID'])[subset]
.agg(pd.Series.mode)
.add_prefix('agg_')
.reset_index()
)
generator_aggs.shape
crosswalk = crosswalk.merge(generator_aggs, on=['CAMD_PLANT_ID', 'CAMD_GENERATOR_ID'], how='left')
# #### Mark one-to-one units
# calculate cardinality of combustor relationships
crosswalk = crosswalk.merge(crosswalk.groupby(['CAMD_PLANT_ID', 'CAMD_UNIT_ID'],
as_index=False)['CAMD_GENERATOR_ID']
.size()
.rename(columns={'size': 'has_single_edge_from_combust'}),
on=['CAMD_PLANT_ID', 'CAMD_UNIT_ID'],
how='left'
)
crosswalk['has_single_edge_from_combust'] = crosswalk['has_single_edge_from_combust'].eq(1)
# calculate cardinality of generator relationships
crosswalk = crosswalk.merge(crosswalk.groupby(['CAMD_PLANT_ID', 'CAMD_GENERATOR_ID'],
as_index=False)['CAMD_UNIT_ID']
.size()
.rename(columns={'size': 'has_single_edge_from_gen'}),
on=['CAMD_PLANT_ID', 'CAMD_GENERATOR_ID'],
how='left'
)
crosswalk['has_single_edge_from_gen'] = crosswalk['has_single_edge_from_gen'].eq(1)
crosswalk = crosswalk.merge(crosswalk.groupby(['CAMD_PLANT_ID', 'CAMD_UNIT_ID'])[['has_single_edge_from_combust',
'has_single_edge_from_gen']]
.all()
.all(axis='columns')
.rename('is_one_to_one')
.reset_index(),
on=['CAMD_PLANT_ID', 'CAMD_UNIT_ID'],
how='left'
)
crosswalk.shape
crosswalk.groupby(['CAMD_PLANT_ID', 'CAMD_UNIT_ID'])['is_one_to_one'].all().agg(['sum', 'mean'])
# #### Join unit info
subset = ['EIA_UNIT_TYPE', 'EIA_FUEL_TYPE', 'CAMD_FACILITY_NAME',
'CAMD_PLANT_ID', 'CAMD_GENERATOR_ID', 'CAMD_NAMEPLATE_CAPACITY',
'EIA_GENERATOR_ID', 'EIA_NAMEPLATE_CAPACITY', 'EIA_BOILER_ID',
'unit_id_epa', 'is_one_to_one', 'MATCH_TYPE_GEN',
'agg_CAMD_FUEL_TYPE', 'agg_EIA_FUEL_TYPE', 'agg_EIA_UNIT_TYPE'
]
max_ramps = max_ramps.join(crosswalk[subset].set_index('unit_id_epa'), on='unit_id_epa')
# +
#subset = ['CAMD_FUEL_TYPE', 'unit_id_epa']
#max_ramps = max_ramps.join(crosswalk[subset].drop_duplicates().set_index('unit_id_epa'), on='unit_id_epa')
# -
# #### Drop non-exporting industrial CHP
# per EPA docs, `Manual CAMD Excluded` indicates industial co-generation plants that don't export to the grid.
max_ramps = max_ramps[max_ramps['MATCH_TYPE_GEN'] != 'Manual CAMD Excluded']
max_ramps.shape
# ## Analyze
# max ramp rates normalized by max observed load: % capacity / hour
max_ramps['max_obs_ramp_factor'] = max_ramps['abs_max_ramps'].div(max_ramps['max_load'])
max_ramps.columns
# ### Look at one-to-one units
max_ramps['is_one_to_one'].isna().sum()
max_ramps[max_ramps['is_one_to_one'].isna()]['count_load'].value_counts()
non_zero_one_to_one = max_ramps[max_ramps['max_obs_ramp_factor'].gt(0)
& max_ramps['is_one_to_one'].fillna(False).astype(bool)].reset_index()
non_zero_one_to_one.shape
# Distribution of MAXIMUM ramp factor faceted by distance from startup/shutdown
# Note that the last category is 10+ hours, not =10
a = sns.displot(data=non_zero_one_to_one,
col='hours_distance_clipped',
y='max_obs_ramp_factor',
hue='nearest_to_startup',
kind="hist",
aspect=0.25,
element="step")
a.set_titles('hours = {col_name}')
# #### By Fuel
non_zero_one_to_one.groupby('unit_id_epa')['agg_CAMD_FUEL_TYPE'].first().value_counts()
fuel_mapping = {'Pipeline Natural Gas': 'gas',
'Diesel Oil': 'oil',
'Natural Gas': 'gas',
'Other Oil': 'oil',
'Residual Oil': 'oil',
'Process Gas': 'gas',
'Other Gas': 'gas',
'Coal Refuse': 'Coal',
'Petroleum Coke': 'oil'
}
non_zero_one_to_one['simple_CAMD_FUEL_TYPE'] = non_zero_one_to_one['agg_CAMD_FUEL_TYPE'].replace(fuel_mapping)
# Note that the last category is 10+ hours, not =10
for fuel in ['Coal', 'oil', 'gas', 'Wood']:
sns.displot(data=non_zero_one_to_one[non_zero_one_to_one['simple_CAMD_FUEL_TYPE'] == fuel],
col='hours_distance_clipped',
y='max_obs_ramp_factor',
hue='nearest_to_startup',
kind="hist",
aspect=0.25,
element="step").set_titles('hours = {col_name}')
plt.suptitle(fuel, y=1.05)
plt.show()
# #### By Generator Type
types = non_zero_one_to_one.groupby('unit_id_epa')['agg_EIA_UNIT_TYPE'].first().value_counts()
types
type_mapping = {'GT': 'Gas Turbine',
'ST': 'Steam Turbine',
'CS': 'Combined Cycle',
'CT': 'Combined Cycle',
'CA': 'Combined Cycle',
'NaN': 'NaN',
'IC': 'Internal Combustion'
}
non_zero_one_to_one['simple_EIA_UNIT_TYPE'] = non_zero_one_to_one['agg_EIA_UNIT_TYPE'].map(type_mapping)
# Note that the last category is 10+ hours, not =10
for gen_type in ['Gas Turbine', 'Steam Turbine', 'Combined Cycle']:
sns.displot(data=non_zero_one_to_one[non_zero_one_to_one['simple_EIA_UNIT_TYPE'] == gen_type],
col='hours_distance_clipped',
y='max_obs_ramp_factor',
hue='nearest_to_startup',
kind="hist",
aspect=0.25,
element="step").set_titles('hours = {col_name}')
plt.suptitle(gen_type, y=1.05)
plt.show()
# ### Aggregate to Generators
# Same story as above (see "Aggregate combustor/generator types")
| notebooks/5.0-tb-one_to_one_ramp_rates_by_plant_type.ipynb |