code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Optimization # + tags=["hide-input"] import numpy as np import matplotlib.pyplot as plt from scipy import stats # These are some parameters to make figures nice (and big) # #%matplotlib inline # #%config InlineBackend.figure_format = 'retina' params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 5), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} plt.rcParams.update(params) # - # # Theory # # In this notebook we will briefly consider the more general problem of fitting a model $\tilde y(\theta)$ to some obervations $y$, where $\theta$ are the model parameters # # If the model is nonlinear, we will not be able to write simple, analytical estimator, so we will look for a numerical solution. This is very common for machine learning problems, where we cannot compute an optimal solution algebraically. # # We will generally fit the parameters by minimizing the misfit, i.e. by minimizing a cost function $J(\theta,y)$: # # $$\hat\theta =\text{argmin } J(\theta,y)$$ # # A typical choice for the cost function is the least-squares cost function. If the errors are independent and normally distributed this is motivatd by maximum likelihood theory. However, this is generally a suitable function that can be useful even if there is no theoretical underpinning. For a set of observations $y(t_j)$, the cost function becomes # $$ J\left[\theta,y(t_j)\right]=\sum_j \left[\tilde y(\theta,t_j)-y(t_j)\right]^2$$ # # For now, we will use a simple python optimiziation method to minimize the cost function. There are several such methods, but they all behave the same. They require a cost function J, that is a function of the parameters, and needs to be minimized, and an initial condition $\theta_i$. # # ``` # theta_hat=optimize.minimize(J,theta_init,args) # ``` # # Example Model # Consider the model $\tilde y(t)=y_0\cdot \exp (-t/\tau)$ # with parameters $\theta =\{y_0,\tau\}$, which can be also written a # # $$\tilde y(\theta,t)=\theta_1 \cdot \exp (-t/\theta_2)$$ # # Let's first visualize the model, before fitting it to some synthetic observations def model(theta,t): y_model=theta[0]*np.exp(-t/theta[1]) return y_model # Let's choose some parameters and generate some pseudo-observations y0_true =3; tau_true =3; theta_true=[y0_true,tau_true]; sigma_e =0.4; # errors # + tags=["hide-input"] # plot model and pseudo observations t=np.linspace(0,10,100) e=stats.norm.rvs(0,sigma_e,100) y_true=model(theta_true,t) y_obs =y_true+e fig,ax=plt.subplots(1,1,figsize=[4,4]) ax.plot(t,y_true,'-',label='truth') ax.plot(t,y_obs,'o',label='observations') ax.legend(); # - # **Let's plot the model for some first guesses** theta=[[3.5,6], [5,3], [3.3,2.4]] # + tags=["hide-input"] #Example of models fig,ax=plt.subplots(1,3,figsize=[12,4]) for j in range(3): y =model(theta[j],t) ax[j].plot(t,y_true,'-',label='truth') ax[j].plot(t,y_obs,'o',label='observations') ax[j].plot(t,y,'-',label='model') ax[j].legend() # - # # Cost Function # # We will generally fit the parameters by minimizing themisfit, i.e. by minimizing a cost function $J(\theta,y)$: # # $$\hat\theta =\text{argmin } J(\theta,y)$$ # # A typical choice for the cost function is the least-squares cost function. If the errors are independent and normally distributed this is motivatd by maximum likelihood theory. However, this is generally a suitable function that can be useful even if there is no theoretical underpinning. For a set of observations $y(t_j)$, the cost function becomes # # $$ J(\theta,y(t_j))=\sum_j \left[\tilde y(\theta,t_j)-y(t_j)\right]^2$$ # def Jcost(theta,y_obs,t): Jcost=np.sum( (y_obs-model(theta,t))**2) return Jcost # + tags=["hide-input"] fig,ax=plt.subplots(1,3,figsize=[12,3]) for j in range(3): y =model(theta[j],t) J=Jcost(theta[j],y_obs,t) ax[j].plot(t,y_true,'-',label='truth') ax[j].plot(t,y_obs,'o',label='observations') ax[j].plot(t,y,'-',label='model') title_str='J='+np.array2string(J,precision=2) ax[j].set_title(title_str) # - # ## Visualize the Cost Function # + N1=21; N2=20; y0_vec=np.linspace(1.5,4,N1); tau_vec=np.linspace(1,4,N2); J=np.zeros(shape=[N1,N2]); for j1 in range(N1): for j2 in range(N2): theta=[y0_vec[j1],tau_vec[j2]]; J[j1,j2]=Jcost(theta,y_obs,t); # + tags=["hide-input"] from matplotlib import cm fig, ax = plt.subplots(subplot_kw={"projection": "3d"},figsize=[10,10]) X,Y=np.meshgrid(tau_vec,y0_vec) surf=ax.plot_surface(X,Y,J,cmap=cm.get_cmap('turbo'), linewidth=0, antialiased=False) ax.invert_yaxis() ax.invert_xaxis() ax.set_ylabel('theta_1=$y_0$'); ax.set_xlabel('theta_2=tau'); ax.set_zlabel('J(theta)'); fig.colorbar(surf, shrink=0.5, aspect=10,label='J(theta)'); # - # # Optimize using scipy package # + from scipy import optimize theta_i=[2,1.2] theta_hat=optimize.minimize(Jcost,theta_i,args=(y_obs,t)).x; print(theta_hat) print(theta_true) # + tags=["hide-input"] y_true=model(theta_true,x) y_obs =y_true+e fig,ax=plt.subplots(1,2,figsize=[20,10]) ax[0].plot(x,y_true,'-',label='truth') ax[0].plot(x,y_obs,'o',label='observations') ax[0].plot(x,model(theta_i,x),'k--',label='initial gues') ax[0].plot(x,model(theta_hat,x),'r--',label='best_fit') ax[0].legend() ax=plt.subplot(1,2,2,projection='3d') X,Y=np.meshgrid(tau_vec,y0_vec) surf=ax.plot_surface(X,Y,J,cmap=cm.get_cmap('turbo'), linewidth=0, antialiased=False,alpha=0.6) ax.invert_yaxis() ax.invert_xaxis() ax.set_ylabel('theta_1=$y_0$'); ax.set_xlabel('theta_2=tau'); ax.set_zlabel('J(theta)'); #ax.grid(False) plt.colorbar(surf,ax=ax, shrink=0.5, aspect=10,label='J(theta)'); ax.plot3D(theta_i[1],theta_i[0],Jcost(theta_i,y_obs,t),'ko',markersize=10,label='initial guess'); ax.plot3D(theta_hat[1],theta_hat[0],Jcost(theta_hat,y_obs,t),'ro',markersize=10,label='best fit'); ax.legend(); # - # # Summary # Most of the code above is for plotting purposes. The actual optimization is done in ver few lines of code, summarized below # + # define your model def model(theta,t): y_model=theta[0]*np.exp(-t/theta[1]) return y_model # define your cost function, as a function of the parameter vector def Jcost(theta,y_obs,t): Jcost=np.sum( (y_obs-model(theta,t))**2) return Jcost # choose an initial guess theta_init=[2,1.2] #optimize theta_hat=optimize.minimize(Jcost,theta_init,args=(y_obs,t)).x;
_build/jupyter_execute/content/Module02/M02_N02_Optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- import pandas as pd import numpy as np import matplotlib.pyplot as plt # - file_path = '../data/drinks.csv' drinks = pd.read_csv(file_path) #read_csv() ํ•จ์ˆ˜๋กœ ๋ฐ์ดํ„ฐ๋ฅผ ๋ฐ์ดํ„ฐ ํ”„๋ ˆ์ž„ ํ˜•ํƒœ๋กœ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. print(drinks.info()) drinks.head(10) # ํ”ผ์ฒ˜์˜ ์ˆ˜์น˜์  ์ •๋ณด ํ™•์ธ drinks.describe() # beer_servings, wind_servings ๋‘ ํ”ผ์ฒ˜ ๊ฐ„์˜ ์ƒ๊ด€ ๊ณ„์ˆ˜๋ฅผ ๊ณ„์‚ฐํ•ฉ๋‹ˆ๋‹ค. # corr() ํ•จ์ˆ˜๋Š” ํ”ผ์ฒ˜๊ฐ„์˜ ์ƒ๊ด€ ๊ณ„์ˆ˜๋ฅผ matrix์˜ ํ˜•ํƒœ๋กœ ์ถœ๋ ฅํ•ฉ๋‹ˆ๋‹ค. # pearson์€ ์ƒ๊ด€ ๊ณ„์ˆ˜๋ฅผ ๊ตฌํ•˜๋Š” ๊ณ„์‚ฐ ๋ฐฉ๋ฒ• ์ค‘ ํ•˜๋‚˜๋ฅผ ์˜๋ฏธํ•˜๋ฉฐ, ๊ฐ€์žฅ ๋„๋ฆฌ ์“ฐ์ด๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. corr = drinks[['beer_servings', 'wine_servings']].corr(method = 'pearson') print(corr) # ํ”ผ์ฒ˜ ๊ฐ„์˜ ์ƒ๊ด€ ๊ณ„์ˆ˜ ํ–‰๋ ฌ์„ ๊ตฌํ•ฉ๋‹ˆ๋‹ค. cols = ['beer_servings', 'spirit_servings', 'wine_servings', 'total_litres_of_pure_alcohol'] corr = drinks[cols].corr(method = 'pearson') print(corr) # !pip install seaborn # + # seaborn ์ด๋ผ๋Š” ์‹œ๊ฐํ™” ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ํ™œ์šฉํ•˜์—ฌ 'heatmap', 'pairplot' ์ด๋ผ๋Š” ๊ธฐ๋ฒ•์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค. import seaborn as sns # corr ํ–‰๋ ฌ ํžˆํŠธ๋งต์„ ์‹œ๊ฐํ™”ํ•ฉ๋‹ˆ๋‹ค. cols_view = ['beer', 'spirit', 'wine', 'alcohol'] # ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ์„ ์œ„ํ•œ cols ์ด๋ฆ„์„ ์ถ•์•ฝํ•ฉ๋‹ˆ๋‹ค. sns.set(font_scale=1.5) hm = sns.heatmap(corr.values, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 15}, yticklabels=cols_view, xticklabels=cols_view) plt.tight_layout() plt.show() # - # ์‹œ๊ฐํ™” ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์ด์šฉํ•œ ํ”ผ์ฒ˜ ๊ฐ„์˜ ์‚ฐ์ ๋„ ๊ทธ๋ž˜ํ”„๋ฅผ ์ถœ๋ ฅํ•ฉ๋‹ˆ๋‹ค. sns.set(style = 'whitegrid', context = 'notebook') sns.pairplot(drinks[['beer_servings', 'spirit_servings', 'wine_servings', 'total_litres_of_pure_alcohol']], height = 2.5) plt.show() # ๊ฒฐ์ธก๊ฐ’์€ ์ž…๋ ฅ์ด ๋ˆ„๋ฝ๋œ ๊ฐ’์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. ๋ณดํ†ต NA๋กœ ์ถœ๋ ฅ๋ฉ๋‹ˆ๋‹ค. # ๊ฒฐ์ธก ๋ฐ์ดํ„ฐ๋ฅผ ์ฒ˜๋ผํ•ฉ๋‹ˆ๋‹ค: ๊ธฐํƒ€ ๋Œ€๋ฅ™์œผ๋กœ ํ†ตํ•ฉ -> 'OT' # ๊ฒฐ์ธก๊ฐ’์„ ๋Œ€์ฒดํ•ด์ฃผ๋Š” ํ•จ์ˆ˜๋Š” fillna()ํ•จ์ˆ˜์ด๋‹ค. fillna(๊ฒฐ์ธก๊ฐ’์„ ๋Œ€์ฒดํ•  ๊ฐ’)์˜ ํ˜•ํƒœ drinks['continent'] = drinks['continent'].fillna('OT') drinks.head(10) # + # ํŒŒ์ด์ฐจํŠธ๋กœ ์‹œ๊ฐํ™”ํ•˜๊ธฐ (์ „์ฒด ๋Œ€๋ฅ™ ใ…ˆใ…‡์—์„œ OT๊ฐ€ ์ฐจ์ง€ํ•˜๋Š” ๋น„์œจ์ด ์–ผ๋งˆ๋‚˜ ๋˜๋Š”์ง€๋ฅผ ํ™•์ธ) labels = drinks['continent'].value_counts().index.tolist() fracs1 = drinks['continent'].value_counts().values.tolist() explode = (0, 0, 0, 0.25, 0, 0) # autopct : ์ž๋™์œผ๋กœ ํผ์„ผํŠธ ์ž…๋ ฅ '&,1f%%' : ์†Œ์ˆ˜์  ํ•œ ์ž๋ฆฌ๊นŒ์ง€ ํฌ๋ฉง plt.pie(fracs1, explode = explode, labels = labels, autopct='%.0f%%', shadow=True) plt.title('null data to \'OT\'') plt.show() # - # ๋Œ€๋ฅ™๋ณ„ spirit_servings์˜ ํ‰๊ท , ์ตœ์†Œ, ์ตœ๋Œ€, ํ•ฉ๊ณ„๋ฅผ ๊ณ„์‚ฐํ•ฉ๋‹ˆ๋‹ค. # agg๋Š” apply()์— ๋“ค์–ด๊ฐ€๋Š” ํ•จ์ˆ˜ ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ๋ณ‘๋ ฌ๋กœ ์„ค์ •ํ•˜์—ฌ ๊ทธ๋ฃน์— ๋Œ€ํ•œ ์—ฌ๋Ÿฌ ๊ฐ€์ง€ ์—ฐ์‚ฐ ๊ฒฐ๊ณผ๋ฅผ ๋™์‹œ์— ์–ป์„ ์ˆ˜ ์žˆ๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค. result = drinks.groupby('continent').spirit_servings.agg(['mean', 'min', 'max', 'sum']) result.head() # ์ „์ฒด ํ‰๊ท ๋ณด๋‹ค ๋งŽ์€ ์•Œ์ฝ”์˜ฌ์„ ์„ญ์ทจํ•˜๋Š” ๋Œ€๋ฅ™์€ ์–ด๋””์ผ๊นŒ? total_mean = drinks.total_litres_of_pure_alcohol.mean() continent_mean = drinks.groupby('continent')['total_litres_of_pure_alcohol'].mean() continent_over_mean = continent_mean[continent_mean >= total_mean] print(continent_over_mean) # ํ‰๊ท  beer_servings๊ฐ€ ๊ฐ€์žฅ ๋†’์€ ๋Œ€๋ฅ™์„ ๊ตฌํ•ฉ๋‹ˆ๋‹ค. # idxmax()๋Š” ์‹œ๋ฆฌ์ฆˆ ๊ฐ์ฒด์—์„œ ๊ฐ’์ด ๊ฐ€์žฅ ํฐ index๋ฅผ ๋ฐ˜ํ™˜ํ•˜๋Š” ๊ธฐ๋Šฅ์„ ์ˆ˜ํ–‰ํ•œ๋‹ค beer_continent = drinks.groupby('continent').beer_servings.mean().idxmax() print(beer_continent) # + # ๋Œ€๋ฅ™๋ณ„ spirit_servings์˜ ํ‰๊ท , ์ตœ์†Œ, ์ตœ๋Œ€, ํ•ฉ๊ณ„๋ฅผ ์‹œ๊ฐํ™”ํ•ฉ๋‹ˆ๋‹ค. n_groups = len(result.index) means = result['mean'].tolist() mins = result['min'].tolist() maxs = result['max'].tolist() sums = result['sum'].tolist() index = np.arange(n_groups) bar_width = 0.1 rects1 = plt.bar(index, means, bar_width, color = 'r', label='Mean') rects2 = plt.bar(index + bar_width, mins, bar_width, color = 'g', label = 'Min') rects3 = plt.bar(index + bar_width * 2, maxs, bar_width, color = 'b', label = 'Max') rects4 = plt.bar(index + bar_width * 3, sums, bar_width, color = 'y', label = 'Sum') plt.xticks(index, result.index.tolist()) plt.legend() plt.show() # + # ๋Œ€๋ฅ™๋ณ„ total_litres_of_pure_alcohol์„ ์‹œ๊ฐํ™”ํ•ฉ๋‹ˆ๋‹ค. continents = continent_mean.index.tolist() continents.append('mean') x_pos = np.arange(len(continents)) alcohol = continent_mean.tolist() alcohol.append(total_mean) bar_list = plt.bar(x_pos, alcohol, align='center', alpha=0.5) bar_list[len(continents) - 1].set_color('r') plt.plot([0., 6], [total_mean, total_mean], "k--") # ํ‹ฑ (Tick)์€ ๊ทธ๋ž˜ํ”„์˜ ์ถ•์— ๊ฐ„๊ฒฉ์„ ๊ตฌ๋ถ„ํ•˜๊ธฐ ์œ„ํ•ด ํ‘œ์‹œํ•˜๋Š” ๋ˆˆ๊ธˆ์ž…๋‹ˆ๋‹ค. # xticks(), yticks() ํ•จ์ˆ˜๋Š” ๊ฐ๊ฐ x์ถ•, y์ถ•์— ๋ˆˆ๊ธˆ์„ ํ‘œ์‹œํ•ฉ๋‹ˆ๋‹ค. plt.xticks(x_pos, continents) plt.ylabel('total_litres_of_pure_alcohol') plt.title('total_litres_of_pure_alcohol by Continent') plt.show() # + # ๋Œ€๋ฅ™๋ณ„ beer_servings์„ ์‹œ๊ฐํ™”ํ•ฉ๋‹ˆ๋‹ค. beer_group = drinks.groupby('continent')['beer_servings'].sum() continents = beer_group.index.tolist() y_pos = np.arange(len(continents)) alcohol = beer_group.tolist() bar_list = plt.bar(y_pos, alcohol, align='center', alpha=0.5) bar_list[continents.index("EU")].set_color('r') plt.xticks(y_pos, continents) plt.ylabel('beer_servings') plt.title('beer_servings by Continent') plt.show() # + # ๋ถ„์„ ๊ฒฐ๊ณผ์— ํƒ€๋‹น์„ฑ์„ ๋ถ€์—ฌํ•˜๊ธฐ ์œ„ํ•ด t-test๋ฅผ ํ†ตํ•ด ํ†ต๊ณ„์ ์œผ๋กœ ์ฐจ์ด๋ฅผ ๊ฒ€์ •ํ•œ๋‹ค. # ์•„ํ”„๋ฆฌ์นด์™€ ์œ ๋Ÿฝ ๊ฐ„์˜ ๋งฅ์ฃผ ์†Œ๋น„๋Ÿ‰ ์ฐจ์ด๋ฅผ ๊ฒ€์ •ํ•ฉ๋‹ˆ๋‹ค. # loc[] ์†์„ฑ์€ ์ธ๋ฑ์Šค๋ฅผ ํ†ตํ•ด ํ–‰ ๋ฐ์ดํ„ฐ๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค. africa = drinks.loc[drinks['continent']=='AF'] europe = drinks.loc[drinks['continent']=='EU'] from scipy import stats # ttest_ind : ๋‘ ๊ฐœ์˜ ๋…๋ฆฝ์ ์ธ ํ‘œ๋ณธ(ndarray), a, b๋ฅผ ์ง€์ •ํ•ด ์ฃผ๋ฉด, ํ•ด๋‹น ํ•จ์ˆ˜๋ฅผ ํ†ตํ•ด t-test ๊ฒ€์ •์ด ์ž๋™์œผ๋กœ ๊ณ„์‚ฐ๋ฉ๋‹ˆ๋‹ค. tTestResult = stats.ttest_ind(africa['beer_servings'], europe['beer_servings']) # equal_var: ๋‘ ์ง‘๋‹จ์˜ variance๊ฐ€ ๊ฐ™์€์ง€, ๋‹ค๋ฅธ์ง€๋ฅผ ์ธก์ •ํ•จ. True์ผ ๊ฒฝ์šฐ๋Š” ๊ฐ™๋‹ค๊ณ , False์ผ ๊ฒฝ์šฐ์—๋Š” ๋‹ค๋ฅด๋‹ค๊ณ  ํ•˜๋ฉฐ, ๋‹ค๋ฅธ ํ…Œ์ŠคํŠธ๋ฅผ ์ˆ˜ํ–‰ํ•จ. tTestResultDiffVar = stats.ttest_ind(africa['beer_servings'], europe['beer_servings'], equal_var = False) #๋‘ ๋ณ€์ˆ˜์˜ ๊ฐ’์€ (statistic=-0.*, pvalue=0.*) ์™€ ๊ฐ™์€ ํ˜•์‹์œผ๋กœ ์ง€์ •๋œ๋‹ค print("The t-statistic and p-value assuming equal variances is %.3f and %.3f."% tTestResult) print("The t-statistic and p-value assuming equal variances is %.3f and %.3f."% tTestResultDiffVar) # + # total_servings ํ”ผ์ฒ˜๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. drinks['total_servings'] = drinks['beer_servings'] + drinks['wine_servings'] + drinks['spirit_servings'] # ์ˆ  ์†Œ๋น„๋Ÿ‰ ๋Œ€๋น„ ์•Œ์ฝ”์˜ฌ ๋น„์œจ ํ”ผ์ฒ˜๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. drinks['alcohol_rate'] = drinks['total_litres_of_pure_alcohol'] / drinks['total_servings'] drinks['alcohol_rate'] = drinks['alcohol_rate'].fillna(0) # ์ˆœ์œ„ ์ •๋ณด๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. country_with_rank = drinks[['country', 'alcohol_rate']] country_with_rank = country_with_rank.sort_values(by=['alcohol_rate'], ascending=0) country_with_rank.head(5) # + # ๊ตญ๊ฐ€๋ณ„ ์ˆœ์œ„ ์ •๋ณด๋ฅผ ๊ทธ๋ž˜ํ”„๋กœ ์‹œ๊ฐํ™”ํ•ฉ๋‹ˆ๋‹ค. country_list = country_with_rank.country.tolist() x_pos = np.arange(len(country_list)) rank = country_with_rank.alcohol_rate.tolist() bar_list = plt.bar(x_pos, rank) bar_list[country_list.index("South Korea")].set_color('r') plt.ylable('alcohol rate') plt.title('liquor drink rank by country') # axis([xmin, xmax, ymin, ymax]) # x,y ์ถ•์˜ ๋ฒ”์œ„ ์„ค์ • plt.axis([0, 200, 0, 0.3]) korea_rank = country_list.index("South Korea") korea_alc_rate = country_with_rank[country_with_rank['country'] == 'South Korea']['alcohol_rate'].values[0] """ ์–ด๋…ธํ…Œ์ด์…˜์ด๋ผ๋Š” ๊ธฐ๋Šฅ์€ ๊ทธ๋ž˜ํ”„์— ํ™”์‚ดํ‘œ๋ฅผ ๊ทธ๋ฆฐํ›„, ๊ทธ ํ™”์‚ดํ‘œ์— ๋ฌธ์ž์—ด์„ ์ถœ๋ ฅํ•˜๋Š” ๊ธฐ๋Šฅ์ด๋‹ค. ์˜ˆ๋ฅผ๋“ค์–ด โ€œ์ด๊ฐ’์ด ์ตœ์†Œ๊ฐ’" ์ด๋Ÿฐ์‹์œผ๋กœ ํ™”์‚ดํ‘œ๋ฅผ ๊ทธ๋ ค์„œ ํ‘œํ˜„ํ• ๋•Œ ์‚ฌ์šฉํ•˜๋Š”๋ฐ plt.annotate ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ๋œ๋‹ค. plt.annotate(โ€˜๋ฌธ์ž์—ด',xy,xytext,arrowprops) ์‹์œผ๋กœ ์‚ฌ์šฉํ•œ๋‹ค. ๋ฌธ์ž์—ด์€ ์–ด๋…ธํ…Œ์ด์…˜์—์„œ ๋‚˜ํƒ€๋‚ผ ๋ฌธ์ž์—ด์ด๊ณ , xy๋Š” ํ™”์‚ดํ‘œ๊ฐ€ ๊ฐ€๋ฅดํ‚ค๋Š” ์ ์˜ ์œ„์น˜, xytext๋Š” ๋ฌธ์ž์—ด์ด ์ถœ๋ ฅ๋  ์œ„์น˜, arrowprops๋Š” ํ™”์‚ดํ‘œ์˜ ์†์„ฑ์œผ๋กœ ์นผ๋ผ๋“ฑ์„ ์ •์˜ํ•œ๋‹ค. """ plt.annotate('South Korea :' + str(korea_rank + 1), xy=(korea_rank, korea_alc_rate), xytext=(korea_rank + 10, korea_alc_rate + 0.05), arrowprops=dict(facecolor='red', shrink=0.05)) plt.show() # -
chapter1/.ipynb_checkpoints/02-drunks-eda-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "-"} # # ๅ‚ๆ•ฐ็ฎก็† # # ๆˆ‘ไปฌ้ฆ–ๅ…ˆๅ…ณๆณจๅ…ทๆœ‰ๅ•้š่—ๅฑ‚็š„ๅคšๅฑ‚ๆ„Ÿ็Ÿฅๆœบ # + origin_pos=2 tab=["pytorch"] import mindspore import numpy as np from mindspore import nn from mindspore import Tensor net = nn.SequentialCell([nn.Dense(4, 8), nn.ReLU(), nn.Dense(8, 1)]) X = Tensor(np.random.rand(2, 4), mindspore.float32) net(X) # + [markdown] slideshow={"slide_type": "slide"} # ๅ‚ๆ•ฐ่ฎฟ้—ฎ # + origin_pos=6 tab=["pytorch"] print(net[2].parameters_dict()) # + [markdown] slideshow={"slide_type": "slide"} # ็›ฎๆ ‡ๅ‚ๆ•ฐ # + origin_pos=10 tab=["pytorch"] print(type(net[2].bias)) print(net[2].bias) print(net[2].bias.data) # + [markdown] slideshow={"slide_type": "slide"} # ไธ€ๆฌกๆ€ง่ฎฟ้—ฎๆ‰€ๆœ‰ๅ‚ๆ•ฐ # + origin_pos=17 tab=["pytorch"] print(*[(name, param.shape) for name, param in net[0].parameters_dict().items()]) print(*[(name, param.shape) for name, param in net.parameters_dict().items()]) # + origin_pos=21 tab=["pytorch"] net.parameters_dict()['2.bias'].data # + [markdown] slideshow={"slide_type": "slide"} # ไปŽๅตŒๅฅ—ๅ—ๆ”ถ้›†ๅ‚ๆ•ฐ # + origin_pos=25 tab=["pytorch"] def block1(): return nn.SequentialCell([nn.Dense(4, 8), nn.ReLU(), nn.Dense(8, 4), nn.ReLU()]) def block2(): net = nn.SequentialCell() for i in range(4): net.append(block1()) return net rgnet = nn.SequentialCell([block2(), nn.Dense(4, 1)]) rgnet(X) # + [markdown] slideshow={"slide_type": "slide"} # ๆˆ‘ไปฌๅทฒ็ป่ฎพ่ฎกไบ†็ฝ‘็ปœ๏ผŒ่ฎฉๆˆ‘ไปฌ็œ‹็œ‹ๅฎƒๆ˜ฏๅฆ‚ไฝ•็ป„็ป‡็š„ # + origin_pos=29 tab=["pytorch"] print(rgnet) # + origin_pos=33 tab=["pytorch"] rgnet[0][1][0].bias.data # + [markdown] slideshow={"slide_type": "slide"} # ๅ†…็ฝฎๅˆๅง‹ๅŒ– # + origin_pos=41 tab=["pytorch"] net = nn.SequentialCell([nn.Dense(4, 8, weight_init='normal', bias_init='zero'), nn.ReLU(), nn.Dense(8, 1, weight_init='normal', bias_init='zero')]) net[0].weight.data[0], net[0].bias.data[0] # + origin_pos=45 tab=["pytorch"] net = nn.SequentialCell([nn.Dense(4, 8, weight_init='one', bias_init='zero'), nn.ReLU(), nn.Dense(8, 1, weight_init='one', bias_init='zero')]) net[0].weight.data[0], net[0].bias.data[0] # + [markdown] slideshow={"slide_type": "slide"} # ๅฏนๆŸไบ›ๅ—ๅบ”็”จไธๅŒ็š„ๅˆๅง‹ๅŒ–ๆ–นๆณ• # + origin_pos=49 tab=["pytorch"] net = nn.SequentialCell([nn.Dense(4, 8, weight_init='xavier_uniform'), nn.ReLU(), nn.Dense(8, 1, weight_init=42)]) print(net[0].weight.data[0]) print(net[2].weight.data[0]) # + [markdown] slideshow={"slide_type": "slide"} # ่‡ชๅฎšไน‰ๅˆๅง‹ๅŒ– # + origin_pos=56 tab=["pytorch"] def my_init(shape): weight = np.random.uniform(-10, 10, shape) weight *= np.abs(weight) >= 5 return Tensor(weight, mindspore.float32) net = nn.SequentialCell([nn.Dense(4, 8, weight_init=my_init((8, 4))), nn.ReLU(), nn.Dense(8, 1, weight_init=my_init((1, 8)))]) net[0].weight[:2] # + origin_pos=60 tab=["pytorch"] net[0].weight.data[:] += 1 net[0].weight.data[0, 0] = 42 net[0].weight.data[0] # + [markdown] slideshow={"slide_type": "slide"} # ๅ‚ๆ•ฐ็ป‘ๅฎš # + origin_pos=65 tab=["pytorch"] shared = nn.Dense(8, 8) net = nn.SequentialCell([nn.Dense(4, 8), nn.ReLU(), shared, nn.ReLU(), shared, nn.ReLU(), nn.Dense(8, 1)]) net(X) print(net[2].weight.data[0] == net[4].weight.data[0]) net[2].weight.data[0, 0] = 100 print(net[2].weight.data[0] == net[4].weight.data[0])
chapter_05_deep-learning-computation/1_parameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import nltk # %matplotlib inline # + from bs4 import BeautifulSoup import re from nltk.corpus import stopwords from sklearn import svm from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import accuracy_score, confusion_matrix, recall_score, precision_score, f1_score # - df = pd.read_csv("wiki_movie_plots_deduped.csv") df.tail() df.info() df['Genre']=df['Genre'].replace('unknown',np.nan) df=df.dropna(axis=0, subset=['Genre']) print(df.tail()) print(len(df)) print(df.shape) a=df['Genre'].value_counts()[:20] b=a.keys().tolist() print(b) df=df[df.Genre.isin(b)] df=df.reset_index(drop=True) sns.set(style="white") genre_to_count=pd.DataFrame({'Genre':a.index, 'Count':a.values}) plt.figure(figsize=(15,10)) sns.barplot(y="Genre", x="Count", data=genre_to_count,palette="Blues_d") # + def plotToWords(raw_plot): letters_only = re.sub("[^a-zA-Z]", " ", raw_plot) lower_case = letters_only.lower() words = lower_case.split() stops = set(stopwords.words("english")) meaningful_words = [w for w in words if not w in stops] return (" ".join(meaningful_words)) def preprocess(dataframe): clean_train_reviews = [] for i in range(0,len(dataframe)): clean_train_reviews.append(plotToWords(dataframe.iloc[i]['Plot'])) dataframe['Plot']=clean_train_reviews return dataframe df=preprocess(df) print(df["Plot"][:10]) # - from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), max_features=4000) features = tfidf.fit_transform(df.Plot).toarray() labels = df.Genre features.shape from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB X_train, X_test, y_train, y_test = train_test_split(df['Plot'], df['Genre'], random_state = 0) count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(X_train) tfidf_transformer = TfidfTransformer() X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts) clf = MultinomialNB().fit(X_train_tfidf, y_train) print(clf.predict(count_vect.transform(["In an interview with CBC Radio, Universitรƒยฉ de Montrรƒยฉal History Professor <NAME> tells Diane about her new book, Variations on the Idea of Happiness, which discusses her thesis that modern society's fixation on self-indulgence is indicative of its decline, predicting a collapse in the ""American Empire,"" of which Quebec is on the periphery. Several of Dominique and Diane's friends, mostly intellectual history professors at the university, prepare for a dinner later in the day, with the men at work in the kitchen while the women work out at the gym. As the dinner draws nearer, the men and women mainly talk about their sex lives, with the men being open about their adulteries, including Rรƒยฉmy, who is married to Louise. Most of the women in the circle of friends have had sex with Rรƒยฉmy, though he is not attractive, but they conceal this from Louise to spare her feelings. Louise has been to an orgy with Rรƒยฉmy, but believes he is generally faithful to her in Montreal. The friends are also accepting of their homosexual friend Claude, who speaks about pursuing men reckless of fear of STDs, while secretly being fearful of having one. During the dinner party, the friends listen to Dominique's theories about the decline of society, with Louise expressing skepticism. To retaliate against Louise, Dominique reveals she has had sex with Rรƒยฉmy and their friend Pierre, causing her to have an emotional meltdown. By morning, relationships have gone back to normal."]))) from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score models = [ LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0), ] CV = 5 cv_df = pd.DataFrame(index=range(CV * len(models))) entries = [] for model in models: model_name = model.__class__.__name__ accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV) for fold_idx, accuracy in enumerate(accuracies): entries.append((model_name, fold_idx, accuracy)) cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy']) cv_df.groupby('model_name').accuracy.mean()
imdbjup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/juanesoc/Instagram-API-python/blob/master/WHO_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="mlZ-X5UGwphU" import numpy as np import pandas as pd from datetime import date import matplotlib.pyplot as plt import time from IPython.display import clear_output import seaborn as sns # + id="PEN6lg7LwphX" colnames = ["Date_reported","Country_code","Country","WHO_region","New_cases","Cumulative_cases","New_deaths","Cumulative_deaths"] # + id="pts8kYF9wphX" dfdata = pd.read_csv("https://covid19.who.int/WHO-COVID-19-global-data.csv",encoding='utf8', low_memory=False,header = 1, names = colnames) # + id="eBwPVhzXwphY" dfdata['Date_reported'] = pd.to_datetime(dfdata['Date_reported'], errors='coerce') # + colab={"base_uri": "https://localhost:8080/"} id="sxwsTcZcwphY" outputId="ebdafd04-88de-47aa-ff5e-171040938ccb" column = dfdata["Date_reported"] max_value = column.max() min_value = column.min() print("Today's date:", date.today()) print(min_value) print(max_value) # + colab={"base_uri": "https://localhost:8080/"} id="sWi1dPFywphZ" outputId="595377fb-99bf-43e6-97d0-f7d3cd046323" dfdata.info() # + colab={"base_uri": "https://localhost:8080/"} id="jo7OD6qV0I5T" outputId="f5d8a384-1a34-4dad-fc33-108087bca6ff" Paises = dfdata.Country.unique() print(Paises) # + colab={"base_uri": "https://localhost:8080/"} id="q4H3U_fTwpha" outputId="064822ba-d2fe-42b6-8831-8b663104aec4" #dfdata column = dfdata["Date_reported"] max_value = column.max() print(max_value) print(column.count()) # + id="f8xe2Xvewpha" columnas = ["Date_reported","Country_code","Country","WHO_region","New_deaths","Cumulative_deaths","New_cases","Cumulative_cases"] # + [markdown] id="WHFlIoFjviwM" # # **Colombia** # + id="XFsobYdQwphd" dfCol = dfdata[dfdata['Country']== 'Colombia'] # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="fGdiU0Qa00Fh" outputId="0acb86e3-6005-4543-b699-d8f6e1d7b14e" dfCol # + id="oO6XfWgLwphd" dfColDeath = dfCol.groupby('Date_reported')['New_deaths'].sum() dfCol = dfCol.groupby('Date_reported')['New_cases'].sum() # + colab={"base_uri": "https://localhost:8080/", "height": 287} id="IViLCxHC1AOG" outputId="38e6bfb9-7d45-46e9-cc76-feed8215f307" plt.style.use('seaborn-whitegrid') dfCol.plot(kind = 'line') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 287} id="0VLxX9SV1NpV" outputId="d3505311-c37f-44db-ec27-7207002132e0" plt.style.use('seaborn-whitegrid') dfColDeath.plot(kind = 'line', color='red') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="9JvMHTzR1_M1" outputId="092f534d-21d2-4359-a27b-8c71ba53db1c" plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfColDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfCol, color='blue') # + colab={"base_uri": "https://localhost:8080/", "height": 276} id="GTRzp2HOGEQk" outputId="8ef7f7d1-d3cc-4afa-b406-e103729f2f54" dfColDeath2 = dfColDeath.to_frame().reset_index() dfColDeath2 = dfColDeath2.drop(columns=['Date_reported']) dfColDeath2 = dfColDeath2.reset_index() # Genera un diagrama de linea plt.plot(dfColDeath2['index'], dfColDeath2['New_deaths'], label='data', color='red') # Trazar en verde una regresiรณn lineal de orden X sns.regplot(x='index', y='New_deaths', data=dfColDeath2, scatter=None, order=6, color='green', label='order 2') plt.show() # + [markdown] id="S7vbyU3mvcrh" # # **Alemania** # + id="PA64dIZIu0NY" dfGer = dfdata[dfdata['Country']== 'Germany'] # + id="ZUlCFp-Su9ZP" dfGerDeath = dfGer.groupby('Date_reported')['New_deaths'].sum() dfGer = dfGer.groupby('Date_reported')['New_cases'].sum() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="pVQFBheSvJSV" outputId="07bc0b99-7bd1-44d3-c73a-244dfcc551ec" plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfGerDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfGer, color='blue') # + [markdown] id="4n0j1ABIyz58" # # **Espaรฑa** # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="tUk1Q1Ojy3Z0" outputId="f2145417-d5d3-44d8-e348-6db9cee907ca" dfEs = dfdata[dfdata['Country']== 'Spain'] dfEsDeath = dfEs.groupby('Date_reported')['New_deaths'].sum() dfEs = dfEs.groupby('Date_reported')['New_cases'].sum() plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfEsDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfEs, color='blue') # + [markdown] id="Z6a-BiyazYAD" # # **Estados Unidos** # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="JNXebeRazXJe" outputId="3cd2c4f8-4882-47ed-bb85-a88cf1c99d68" dfUS = dfdata[dfdata['Country_code']== 'US'] dfUSDeath = dfUS.groupby('Date_reported')['New_deaths'].sum() dfUS = dfUS.groupby('Date_reported')['New_cases'].sum() plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfUSDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfUS, color='blue') # + [markdown] id="QFRyJnoa0Stl" # # **Argentina** # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="P51rVvUN0Wki" outputId="8a248cc1-5dac-45a4-82cd-466c7f67586e" dfAr = dfdata[dfdata['Country']== 'Argentina'] dfArDeath = dfAr.groupby('Date_reported')['New_deaths'].sum() dfAr = dfAr.groupby('Date_reported')['New_cases'].sum() plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfArDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfAr, color='blue') # + [markdown] id="B4FGz2c27toL" # # **Italia** # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="l7zVO3To7st-" outputId="57d1ca7c-01bc-4bee-d796-a07b0580b185" dfIt = dfdata[dfdata['Country']== 'Italy'] dfItDeath = dfIt.groupby('Date_reported')['New_deaths'].sum() dfIt = dfIt.groupby('Date_reported')['New_cases'].sum() plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfItDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfIt, color='blue') # + [markdown] id="KtGUzqOn011z" # # **Grafico por paises** # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="kZGT6vbZ0033" outputId="fa88f961-b602-45b4-c02d-b823b0ca726b" x = 0 for i in Paises: clear_output(wait=True) x = x + 1 print(str(x) + ' ' + i) dfCount = dfdata[dfdata['Country']== i] dfCountDeath = dfCount.groupby('Date_reported')['New_deaths'].sum() dfCount = dfCount.groupby('Date_reported')['New_cases'].sum() plt.style.use('seaborn-whitegrid') fig, ax_left = plt.subplots() ax_right = ax_left.twinx() ax_left.set_ylabel('Muertos', color='red') ax_left.plot(dfCountDeath, color='red') ax_right.set_ylabel('Casos', color='blue') ax_right.plot(dfCount, color='blue') plt.show() time.sleep(1) # + id="BA-fj-JoCP9A" # + colab={"base_uri": "https://localhost:8080/"} id="6bUP9-H3E7H4" outputId="b9586bb9-a708-4ba6-c7c6-334a00dfe039" dfColDeath2['index']
WHO_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + cellView="form" colab={} colab_type="code" id="zh8WOre17mel" #@title ##### License # Copyright 2018 The GraphNets Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # + [markdown] colab_type="text" id="rPPRpGpz1Hce" # ![](https://github.com/deepmind/graph_nets/raw/master/images/graph-nets-deepmind-shortest-path.gif) # # # Tutorial of the Graph Nets library # # The purpose of this tutorial is to get practical experience using the Graph Nets library via examples of: # 1. Building graph data structures `graph_nets.graphs.GraphsTuple` using `graph_nets.utils_np` . # 2. Operating with graph data structures in the tensorflow graph using `graph_nets.utils_tf` . # 3. Feeding graphs to Graph Nets tensorflow modules in `graph_nets.modules` . # 4. Building custom Graph Nets modules using the graph net building blocks provided in `graph_nets.blocks` . # # For more information about graph networks, see our arXiv paper: [Relational inductive biases, deep learning, and graph networks](https://arxiv.org/abs/1806.01261). # + cellView="form" colab={} colab_type="code" id="bq24M0Ig7r8N" #@title ### Install the Graph Nets library on this Colaboratory runtime { form-width: "60%", run: "auto"} #@markdown <br>1. Connect to a local or hosted Colaboratory runtime by clicking the **Connect** button at the top-right.<br>2. Choose "Yes" below to install the Graph Nets library on the runtime machine with the correct dependencies. Note, this works both with local and hosted Colaboratory runtimes. install_graph_nets_library = "No" #@param ["Yes", "No"] if install_graph_nets_library.lower() == "yes": print("Installing Graph Nets library and dependencies:") print("Output message from command:\n") # !pip install graph_nets "dm-sonnet<2" "tensorflow_probability<0.9" else: print("Skipping installation of Graph Nets library") # + [markdown] colab_type="text" id="1S-8wRohRDE2" # ### Install dependencies locally # # If you are running this notebook locally (i.e., not through Colaboratory), you will also need to install a few more dependencies. Run the following on the command line to install the graph networks library, as well as a few other dependencies: # # ``` # pip install graph_nets matplotlib scipy "tensorflow>=1.15,<2" "dm-sonnet<2" "tensorflow_probability<0.9" # + cellView="form" colab={} colab_type="code" id="ww1nT7Ddydrk" #@title #### (Imports) # %tensorflow_version 1.x # For Google Colab only. from __future__ import absolute_import from __future__ import division from __future__ import print_function from graph_nets import blocks from graph_nets import graphs from graph_nets import modules from graph_nets import utils_np from graph_nets import utils_tf import matplotlib.pyplot as plt import networkx as nx import numpy as np import sonnet as snt import tensorflow as tf # + [markdown] colab_type="toc" id="PbT9ciFt4vGk" # >[Tutorial of the Graph Nets library](#scrollTo=rPPRpGpz1Hce) # # >[A Graph Net module](#scrollTo=jS0bIZykbaw4) # # >>[The graphs.GraphsTuple class](#scrollTo=Tc355RxEak3h) # # >[Creating graphs](#scrollTo=Rg3HRdEAyYi0) # # >>[What's contained in a graph?](#scrollTo=VC023vAp0pAp) # # >>[How to represent graphs as a graphs.GraphsTuple](#scrollTo=8sOteecpHj0Z) # # >>>[Visualize the graphs using networkx](#scrollTo=saTlW6FUsQou) # # >>>[Print the GraphsTuple fields](#scrollTo=o-HixEQ7sU6q) # # >>[Different ways of expressing data as a graph](#scrollTo=JtUYYeERq2x2) # # >>>[Graph with no features](#scrollTo=nz7RDJL1r8Mh) # # >>>[Set (ie. graph without edges)](#scrollTo=efQtkXoFr5su) # # >>>[Creating a GraphsTuple from a networkx graph](#scrollTo=oPrZFvcRr1yU) # # >>[Working with tensor GraphsTuple's](#scrollTo=cSUY6pbYXE1i) # # >>[Creating a constant tensor GraphsTuple from data dicts](#scrollTo=_kygzbsSXsHJ) # # >>[GraphsTuple placeholders](#scrollTo=lQVz4xCbXr_J) # # >>[Slicing graphs from within a batch](#scrollTo=acSavUGPXsZI) # # >>[Concatenating GraphsTuples](#scrollTo=MLD2KEdEYldQ) # # >[Graph Net modules](#scrollTo=VUzXQDdRaIKt) # # >>[Creating a modules.GraphNetwork](#scrollTo=0rOU3rjRbK9O) # # >>[Feeding a GraphsTuple to a Graph Net](#scrollTo=VsSIrvMEbV1W) # # >>[Connecting a GraphNetwork recurrently](#scrollTo=HGK5xGBob3lb) # # >[Graph Net building blocks](#scrollTo=qfhlV2qpah5O) # # >>[Broadcast operations](#scrollTo=Ta0YenqKaq7E) # # >>[Aggregators](#scrollTo=uE8bBiLbaoCX) # # >>[blocks.EdgeBlock](#scrollTo=lE0e3bMna96z) # # >>[blocks.NodeBlock](#scrollTo=FsqdEWjLa0kF) # # >>[blocks.GlobalBlock](#scrollTo=JlNdE_rbbBOj) # # >>[Block compositionality](#scrollTo=ZC_Y6dEZqNCx) # # >[Various canonical Graph Net modules](#scrollTo=n4gULrAFbnwE) # # >>[Independent Graph Net (modules.GraphIndependent)](#scrollTo=bHhltBuVv5hR) # # >>[Message-passing neural networks (modules.InteractionNetwork, modules.CommNet)](#scrollTo=bJX9iMMIt8T9) # # >>[Non-local neural networks (modules.SelfAttention)](#scrollTo=XMCMOdkNvJ58) # # >>[Relation network (modules.RelationNetwork)](#scrollTo=bJAx7bZ5vM-e) # # >>[Deep Sets (modules.DeepSets)](#scrollTo=ncp-swtyvQ0C) # # # + [markdown] colab_type="text" id="Tc355RxEak3h" # ## The [`graphs.GraphsTuple`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-graphsgraphstuple) class # # The Graph Nets library contains models which operate on graph-structured data, so the first thing to understand is how graph-structured data is represented in the code. # # The `graph_nets.graphs.GraphsTuple` class, defined in [`graph_nets/graphs.py`](https://github.com/deepmind/graph_nets/blob/master/graph_nets/graphs.py), represents a **batches of one or more graphs**. All graph network modules take instances of `GraphsTuple` as input, and return instances of `GraphsTuple` as output. The graphs are *directed* (one-way edges), *attributed* (node-, edge-, and graph-level features are allowed), *multigraphs* (multiple edges can connect any two nodes, and self-edges are allowed). See Box 3, page 11 in [our companion arXiv paper](https://arxiv.org/pdf/1806.01261.pdf) for details. # # A `GraphsTuple` has attributes: # - `n_node` (shape=[num_graphs]): Number of nodes in each graph in the batch. # - `n_edge` (shape=[num_graphs]): Number of edges in each graph in the batch. # - `globals` (shape=[num_graphs] + global_feature_dimensions): Global features for each graph in the batch. # - `nodes` (shape=[total_num_nodes] + node_feature_dimensions): Node features for each node in the batch of graphs. # - `edges` (shape=[total_num_edges] + edge_feature_dimensions): Edge features for each edge in the batch of graphs. # - `senders`(shape=[total_num_edges]): Indices of the nodes in `nodes`, which indicate the source node of each directed edge in `edges`. # - `receivers` (shape=[total_num_edges]): Indices of the nodes in `nodes`, which indicate the destination node of each directed edge in `edges`. # # The nodes and edges from the different graphs in the batch are concatenated along the first axis of the `nodes` and `edges` fields, and can be partitioned using the `n_node` and `n_edge` fields respectively. Note, all but the "`n_*`" fields are optional (see examples below). # # The attributes of a `GraphsTuple` instance are typically either Numpy arrays or TensorFlow tensors. The library contains utilities for manipulating graphs with each of these types of attributes, respectively: # * `utils_np` (for Numpy arrays) # * `utils_tf` (for TensorFlow tensors) # # An important method of the `GraphsTuple` class is [`GraphsTuple.replace`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#graphsgraphstuplereplacekwargs): Similarly to `collections.namedtuple._replace` (in fact, `GraphsTuple` is sub-class of `collections.namedtuple`), this method creates a copy of the `GraphsTuple`, with references to all of the original attributes, by replacing some of them by the values provided as keyword arguments. # + [markdown] colab_type="text" id="Rg3HRdEAyYi0" # # Creating graphs # + [markdown] colab_type="text" id="VC023vAp0pAp" # ## What's contained in a graph? # # Each graph will have a global feature, several nodes, and several edges. The graphs can have different numbers of nodes and edges, but the lengths of the global, node, and edge attribute vectors must be the same across graphs. # In order to create a `graphs.GraphsTuple` instance, we can define a `list` whose elements are `dict`s, with the following keys, that contain each graph's data: # - "globals": Each graph has a single `float`-valued feature vector. # - "nodes": Each graph has a set of nodes with `float`-valued feature vectors. # - "edges": Each graph has a set of edges with `float`-valued feature vectors. # - "senders": Each edge connects a sender node, represented by an `int`-valued node index, to a receiver node. # - "receivers": Each edge connects a sender node to a receiver node, represented by an `int`-valued node index. # # Try running the cell below to create some dummy graph data. # + cellView="both" colab={} colab_type="code" id="jW6MIo92HgOR" # Global features for graph 0. globals_0 = [1., 2., 3.] # Node features for graph 0. nodes_0 = [[10., 20., 30.], # Node 0 [11., 21., 31.], # Node 1 [12., 22., 32.], # Node 2 [13., 23., 33.], # Node 3 [14., 24., 34.]] # Node 4 # Edge features for graph 0. edges_0 = [[100., 200.], # Edge 0 [101., 201.], # Edge 1 [102., 202.], # Edge 2 [103., 203.], # Edge 3 [104., 204.], # Edge 4 [105., 205.]] # Edge 5 # The sender and receiver nodes associated with each edge for graph 0. senders_0 = [0, # Index of the sender node for edge 0 1, # Index of the sender node for edge 1 1, # Index of the sender node for edge 2 2, # Index of the sender node for edge 3 2, # Index of the sender node for edge 4 3] # Index of the sender node for edge 5 receivers_0 = [1, # Index of the receiver node for edge 0 2, # Index of the receiver node for edge 1 3, # Index of the receiver node for edge 2 0, # Index of the receiver node for edge 3 3, # Index of the receiver node for edge 4 4] # Index of the receiver node for edge 5 # Global features for graph 1. globals_1 = [1001., 1002., 1003.] # Node features for graph 1. nodes_1 = [[1010., 1020., 1030.], # Node 0 [1011., 1021., 1031.]] # Node 1 # Edge features for graph 1. edges_1 = [[1100., 1200.], # Edge 0 [1101., 1201.], # Edge 1 [1102., 1202.], # Edge 2 [1103., 1203.]] # Edge 3 # The sender and receiver nodes associated with each edge for graph 1. senders_1 = [0, # Index of the sender node for edge 0 0, # Index of the sender node for edge 1 1, # Index of the sender node for edge 2 1] # Index of the sender node for edge 3 receivers_1 = [0, # Index of the receiver node for edge 0 1, # Index of the receiver node for edge 1 0, # Index of the receiver node for edge 2 0] # Index of the receiver node for edge 3 data_dict_0 = { "globals": globals_0, "nodes": nodes_0, "edges": edges_0, "senders": senders_0, "receivers": receivers_0 } data_dict_1 = { "globals": globals_1, "nodes": nodes_1, "edges": edges_1, "senders": senders_1, "receivers": receivers_1 } # + [markdown] colab_type="text" id="8sOteecpHj0Z" # ## How to represent graphs as a [`graphs.GraphsTuple`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#utils_npgraphs_tuple_to_data_dictsgraph) # # The `utils_np` module contains a functions named [`utils_np.data_dicts_to_graphs_tuple`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#utils_npgraphs_tuple_to_data_dictsgraph), which takes a `list` of `dict`s with the keys specified above, and returns a `GraphsTuple` that represents the sequence of graphs. # # The `data_dicts_to_graphs_tuple` function does three things: # 1. It concatenates the data from the multiple graphs together along their innermost axes (i.e. the batch dimension). This allows the graph net to process the node and edge attributes by a shared function in parallel. # 2. It counts up the numbers of nodes and edges per graph, and stores them in the fields "n_node" and "n_edge", respectively, where their lengths equal the number of graphs. This is used for keeping track of which nodes and edges belong to which graph, so they can be split up later, and so the graph can broadcast a graph's global attributes across its nodes and edges. # 3. It adds an integer offset to the sender and receiver indices, which corresponds to the number of nodes in the preceding graphs. This allows the indices to correspond to the nodes and edges of their corresponding graph, after the node and edge attributes have been concatenated. # # Try running the cell below to put the graph dictionaries into a `GraphsTuple` using `utils_np.data_dicts_to_graphs_tuple`. # + colab={} colab_type="code" id="cpUOMJo30oHa" data_dict_list = [data_dict_0, data_dict_1] graphs_tuple = utils_np.data_dicts_to_graphs_tuple(data_dict_list) # + [markdown] colab_type="text" id="saTlW6FUsQou" # ### Visualize the graphs using `networkx` # # A `GraphsTuple` can be converted into a `list` of `networkx` graph objects for easy visualization. # # Try running the cell below to visualize the graphs we've just defined. # + cellView="both" colab={} colab_type="code" id="B-vAc0An9Dvu" graphs_nx = utils_np.graphs_tuple_to_networkxs(graphs_tuple) _, axs = plt.subplots(ncols=2, figsize=(6, 3)) for iax, (graph_nx, ax) in enumerate(zip(graphs_nx, axs)): nx.draw(graph_nx, ax=ax) ax.set_title("Graph {}".format(iax)) # + [markdown] colab_type="text" id="o-HixEQ7sU6q" # ### Print the `GraphsTuple` fields # # You can also print out the data contained in a `GraphsTuple` by running the cell below. # + colab={} colab_type="code" id="akIs5O2b9KOI" def print_graphs_tuple(graphs_tuple): print("Shapes of `GraphsTuple`'s fields:") print(graphs_tuple.map(lambda x: x if x is None else x.shape, fields=graphs.ALL_FIELDS)) print("\nData contained in `GraphsTuple`'s fields:") print("globals:\n{}".format(graphs_tuple.globals)) print("nodes:\n{}".format(graphs_tuple.nodes)) print("edges:\n{}".format(graphs_tuple.edges)) print("senders:\n{}".format(graphs_tuple.senders)) print("receivers:\n{}".format(graphs_tuple.receivers)) print("n_node:\n{}".format(graphs_tuple.n_node)) print("n_edge:\n{}".format(graphs_tuple.n_edge)) print_graphs_tuple(graphs_tuple) # + [markdown] colab_type="text" id="3TaUiWVh_SX9" # ### Back to data dicts # It is also possible to retrieve a `list` of graph `dict`s by using `utils_np.graphs_tuple_to_data_dicts`: # + colab={} colab_type="code" id="PRrsWIDK_Sm5" recovered_data_dict_list = utils_np.graphs_tuple_to_data_dicts(graphs_tuple) # + [markdown] colab_type="text" id="JtUYYeERq2x2" # ## Ways to represent different data sources with a graph # # As mentioned above, some graph properties are optional, and the following two cells show two instances of how this can be used. # + [markdown] colab_type="text" id="nz7RDJL1r8Mh" # ### Graph with no features # + colab={} colab_type="code" id="PlBpGYcx0Ux5" # Number of nodes n_node = 3 # Three edges connecting the nodes in a cycle senders = [0, 1, 2] # Indices of nodes sending the edges receivers = [1, 2, 0] # Indices of nodes receiving the edges data_dict = { "n_node": n_node, "senders": senders, "receivers": receivers, } graphs_tuple = utils_np.data_dicts_to_graphs_tuple([data_dict]) # + [markdown] colab_type="text" id="efQtkXoFr5su" # ### Set (ie. graph without edges) # + colab={} colab_type="code" id="J18qD3fU41OE" # Node features. nodes = [[10.], # Node 0 [11.], # Node 1 [12.]] # Node 2 data_dict = { "nodes": nodes, } graphs_tuple = utils_np.data_dicts_to_graphs_tuple([data_dict]) # We can visualize the graph using networkx. graphs_nx = utils_np.graphs_tuple_to_networkxs(graphs_tuple) ax = plt.figure(figsize=(3, 3)).gca() nx.draw(graphs_nx[0], ax=ax) _ = ax.set_title("Graph without edges") # + [markdown] colab_type="text" id="oPrZFvcRr1yU" # ### Creating a `GraphsTuple` from a `networkx` graph # # [`networkx`](https://networkx.github.io/) is a powerful graph manipulation library in Python. A `GraphsTuple` to be built from `networkx` graphs as follows: # + colab={} colab_type="code" id="ONEOf83YyTKp" graph_nx = nx.OrderedMultiDiGraph() # Globals. graph_nx.graph["features"] = np.array([0.6, 0.7, 0.8]) # Nodes. graph_nx.add_node(0, features=np.array([0.3, 1.3])) graph_nx.add_node(1, features=np.array([0.4, 1.4])) graph_nx.add_node(2, features=np.array([0.5, 1.5])) graph_nx.add_node(3, features=np.array([0.6, 1.6])) # Edges. graph_nx.add_edge(0, 1, features=np.array([3.6, 3.7])) graph_nx.add_edge(2, 0, features=np.array([5.6, 5.7])) graph_nx.add_edge(3, 0, features=np.array([6.6, 6.7])) ax = plt.figure(figsize=(3, 3)).gca() nx.draw(graph_nx, ax=ax) ax.set_title("Graph") graphs_tuple = utils_np.networkxs_to_graphs_tuple([graph_nx]) print_graphs_tuple(graphs_tuple) # + [markdown] colab_type="text" id="cSUY6pbYXE1i" # ## Working with tensor `GraphsTuple`'s # + cellView="both" colab={} colab_type="code" id="J7tlhBX1Z8RP" #@title #### (Define functions for generating and plotting graphs) GLOBAL_SIZE = 4 NODE_SIZE = 5 EDGE_SIZE = 6 def get_graph_data_dict(num_nodes, num_edges): return { "globals": np.random.rand(GLOBAL_SIZE).astype(np.float32), "nodes": np.random.rand(num_nodes, NODE_SIZE).astype(np.float32), "edges": np.random.rand(num_edges, EDGE_SIZE).astype(np.float32), "senders": np.random.randint(num_nodes, size=num_edges, dtype=np.int32), "receivers": np.random.randint(num_nodes, size=num_edges, dtype=np.int32), } graph_3_nodes_4_edges = get_graph_data_dict(num_nodes=3, num_edges=4) graph_5_nodes_8_edges = get_graph_data_dict(num_nodes=5, num_edges=8) graph_7_nodes_13_edges = get_graph_data_dict(num_nodes=7, num_edges=13) graph_9_nodes_25_edges = get_graph_data_dict(num_nodes=9, num_edges=25) graph_dicts = [graph_3_nodes_4_edges, graph_5_nodes_8_edges, graph_7_nodes_13_edges, graph_9_nodes_25_edges] def plot_graphs_tuple_np(graphs_tuple): networkx_graphs = utils_np.graphs_tuple_to_networkxs(graphs_tuple) num_graphs = len(networkx_graphs) _, axes = plt.subplots(1, num_graphs, figsize=(5*num_graphs, 5)) if num_graphs == 1: axes = axes, for graph, ax in zip(networkx_graphs, axes): plot_graph_networkx(graph, ax) def plot_graph_networkx(graph, ax, pos=None): node_labels = {node: "{:.3g}".format(data["features"][0]) for node, data in graph.nodes(data=True) if data["features"] is not None} edge_labels = {(sender, receiver): "{:.3g}".format(data["features"][0]) for sender, receiver, data in graph.edges(data=True) if data["features"] is not None} global_label = ("{:.3g}".format(graph.graph["features"][0]) if graph.graph["features"] is not None else None) if pos is None: pos = nx.spring_layout(graph) nx.draw_networkx(graph, pos, ax=ax, labels=node_labels) if edge_labels: nx.draw_networkx_edge_labels(graph, pos, edge_labels, ax=ax) if global_label: plt.text(0.05, 0.95, global_label, transform=ax.transAxes) ax.yaxis.set_visible(False) ax.xaxis.set_visible(False) return pos def plot_compare_graphs(graphs_tuples, labels): pos = None num_graphs = len(graphs_tuples) _, axes = plt.subplots(1, num_graphs, figsize=(5*num_graphs, 5)) if num_graphs == 1: axes = axes, pos = None for name, graphs_tuple, ax in zip(labels, graphs_tuples, axes): graph = utils_np.graphs_tuple_to_networkxs(graphs_tuple)[0] pos = plot_graph_networkx(graph, ax, pos=pos) ax.set_title(name) # + [markdown] colab_type="text" id="_kygzbsSXsHJ" # ## Creating a constant tensor `GraphsTuple` from data dicts # # Similar to `utils_np.data_dicts_to_graphs_tuple`, the `utils_tf` module, which manipulates graphs whose attributes are represented as TensorFlow tensors, contains a function named [`utils_tf.data_dicts_to_graphs_tuple`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#utils_tfgraphs_tuple_to_data_dictsgraph), which creates a constant tensor graph from data dicts, containing either numpy arrays of tensors. # + colab={} colab_type="code" id="Yf8d8qV2gQV8" tf.reset_default_graph() graphs_tuple_tf = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) with tf.Session() as sess: graphs_tuple_np = sess.run(graphs_tuple_tf) plot_graphs_tuple_np(graphs_tuple_np) # + colab={} colab_type="code" id="6ufxDR3vpuG5" # If the GraphsTuple has None's we need to make use of `utils_tf.make_runnable_in_session`. tf.reset_default_graph() graphs_tuple_tf = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) # Removing the edges from a graph. graph_with_nones = graphs_tuple_tf.replace( edges=None, senders=None, receivers=None, n_edge=graphs_tuple_tf.n_edge*0) runnable_in_session_graph = utils_tf.make_runnable_in_session(graph_with_nones) with tf.Session() as sess: graphs_tuple_np = sess.run(runnable_in_session_graph) plot_graphs_tuple_np(graphs_tuple_np) # + [markdown] colab_type="text" id="lQVz4xCbXr_J" # ## `GraphsTuple` placeholders # # In TensorFlow, data is often passed into a session via [placeholder tensors](https://www.tensorflow.org/api_docs/python/tf/placeholder). The cell below shows how to create placeholders for graph data. # + colab={} colab_type="code" id="q46yrk1wuAxN" tf.reset_default_graph() # Create a placeholder using the first graph in the list as template. graphs_tuple_ph = utils_tf.placeholders_from_data_dicts(graph_dicts[0:1]) with tf.Session() as sess: # Feeding a batch of graphs with different sizes, and different # numbers of nodes and edges through the placeholder. feed_dict = utils_tf.get_feed_dict( graphs_tuple_ph, utils_np.data_dicts_to_graphs_tuple(graph_dicts[1:])) graphs_tuple_np = sess.run(graphs_tuple_ph, feed_dict) plot_graphs_tuple_np(graphs_tuple_np) # + [markdown] colab_type="text" id="UuBFLcpgvgLL" # A similar utility is provided to work with `networkx` graphs: [`utils_np.data_dict_to_networkx`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#utils_npdata_dict_to_networkxdata_dict). # + [markdown] colab_type="text" id="acSavUGPXsZI" # ## Slicing graphs from within a batch # # A subset of graphs can be retrieved from a batch represented by `GraphsTuple` as follows. # + colab={} colab_type="code" id="63OYmvUxZBWn" # If the GraphsTuple has None's we need to make use of `utils_tf.make_runnable_in_session`. tf.reset_default_graph() graphs_tuple_tf = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) first_graph_tf = utils_tf.get_graph(graphs_tuple_tf, 0) three_graphs_tf = utils_tf.get_graph(graphs_tuple_tf, slice(1, 4)) with tf.Session() as sess: first_graph_np = sess.run(first_graph_tf) three_graphs_np = sess.run(three_graphs_tf) plot_graphs_tuple_np(first_graph_np) plot_graphs_tuple_np(three_graphs_np) # + [markdown] colab_type="text" id="MLD2KEdEYldQ" # ## Concatenating multiple `GraphsTuple` instances # + colab={} colab_type="code" id="MrWouGJHxeIO" # Concatenating along the batch dimension tf.reset_default_graph() graphs_tuple_1_tf = utils_tf.data_dicts_to_graphs_tuple(graph_dicts[0:1]) graphs_tuple_2_tf = utils_tf.data_dicts_to_graphs_tuple(graph_dicts[1:]) graphs_tuple_tf = utils_tf.concat([graphs_tuple_1_tf, graphs_tuple_2_tf], axis=0) with tf.Session() as sess: graphs_tuple_np = sess.run(graphs_tuple_tf) plot_graphs_tuple_np(graphs_tuple_np) # + [markdown] colab_type="text" id="btcMDrixx9Bs" # Similarly, we can concatenate along feature dimensions, assuming all of the batches to be concatenates have the same graph structure/connectivity. # + [markdown] colab_type="text" id="_KzQhLJJYEQZ" # See `utils_tf` for more methods to work with GraphsTuple's containing tensors. # + [markdown] colab_type="text" id="VUzXQDdRaIKt" # # Graph Net modules # # So far we've covered how to manipulate graph-structured data via the `GraphsTuple` class and the `utils_np` and `utils_tf` utilities. Now we show how to use actual graph networks. # + [markdown] colab_type="text" id="0rOU3rjRbK9O" # ## Creating a [`modules.GraphNetwork`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulesgraphnetwork) # # ![Graph Network block](https://github.com/deepmind/graph_nets/raw/master/images/graph-network.png) # # A graph network has up to three learnable sub-functions: edge ($\phi^e$), node ($\phi^v$), and global ($\phi^u$) in the schematic above. See Section 3.2.2, page 12 in [our companion arXiv paper](https://arxiv.org/pdf/1806.01261.pdf) for details. # # To instantiate a graph network module in the library, these sub-functions are specified via constructor arguments which are `callable`s that return Sonnet modules, such as `snt.Linear` or `snt.nets.MLP`. # # The reason that a `callable` is provided, instead of the module/method directly, is so the Graph Net object owns the modules and the variables created by them. # + colab={} colab_type="code" id="P5_ADeDsyQNZ" tf.reset_default_graph() OUTPUT_EDGE_SIZE = 10 OUTPUT_NODE_SIZE = 11 OUTPUT_GLOBAL_SIZE = 12 graph_network = modules.GraphNetwork( edge_model_fn=lambda: snt.Linear(output_size=OUTPUT_EDGE_SIZE), node_model_fn=lambda: snt.Linear(output_size=OUTPUT_NODE_SIZE), global_model_fn=lambda: snt.Linear(output_size=OUTPUT_GLOBAL_SIZE)) # + [markdown] colab_type="text" id="VsSIrvMEbV1W" # ## Feeding a `GraphsTuple` to a Graph Net # # A `GraphsTuple` can be fed into a graph network, which returns an output graph with the same number of nodes, edges, and edge connectivity, but with updated edge, node and global features. All of the output features are conditioned on the input features according to the graph structure, and are fully differentiable. # + colab={} colab_type="code" id="10g2R-aKz2kY" input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) output_graphs = graph_network(input_graphs) print("Output edges size: {}".format(output_graphs.edges.shape[-1])) # Equal to OUTPUT_EDGE_SIZE print("Output nodes size: {}".format(output_graphs.nodes.shape[-1])) # Equal to OUTPUT_NODE_SIZE print("Output globals size: {}".format(output_graphs.globals.shape[-1])) # Equal to OUTPUT_GLOBAL_SIZE # + [markdown] colab_type="text" id="HGK5xGBob3lb" # ## Connecting a `GraphNetwork` recurrently # # A Graph Net module can be chained recurrently by matching the output feature sizes to the input feature sizes, and feeding the output back to the input multiple times ([arXiv paper](https://arxiv.org/abs/1806.01261), bottom of Fig. 6a). # + colab={} colab_type="code" id="e54GDFXKB2NO" tf.reset_default_graph() input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) graph_network = modules.GraphNetwork( edge_model_fn=lambda: snt.Linear(output_size=EDGE_SIZE), node_model_fn=lambda: snt.Linear(output_size=NODE_SIZE), global_model_fn=lambda: snt.Linear(output_size=GLOBAL_SIZE)) num_recurrent_passes = 3 previous_graphs = input_graphs for unused_pass in range(num_recurrent_passes): previous_graphs = graph_network(previous_graphs) output_graphs = previous_graphs # + [markdown] colab_type="text" id="kZzEBReqCimx" # Alternatively, we can process the input graph multiple times with a graph state that gets updated recurrently. # + colab={} colab_type="code" id="_rfSo5NJ1UwG" def zeros_graph(sample_graph, edge_size, node_size, global_size): zeros_graphs = sample_graph.replace(nodes=None, edges=None, globals=None) zeros_graphs = utils_tf.set_zero_edge_features(zeros_graphs, edge_size) zeros_graphs = utils_tf.set_zero_node_features(zeros_graphs, node_size) zeros_graphs = utils_tf.set_zero_global_features(zeros_graphs, global_size) return zeros_graphs tf.reset_default_graph() graph_network = modules.GraphNetwork( edge_model_fn=lambda: snt.Linear(output_size=OUTPUT_EDGE_SIZE), node_model_fn=lambda: snt.Linear(output_size=OUTPUT_NODE_SIZE), global_model_fn=lambda: snt.Linear(output_size=OUTPUT_GLOBAL_SIZE)) input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) initial_state = zeros_graph( input_graphs, OUTPUT_EDGE_SIZE, OUTPUT_NODE_SIZE, OUTPUT_GLOBAL_SIZE) num_recurrent_passes = 3 current_state = initial_state for unused_pass in range(num_recurrent_passes): input_and_state_graphs = utils_tf.concat( [input_graphs, current_state], axis=1) current_state = graph_network(input_and_state_graphs) output_graphs = current_state # + [markdown] colab_type="text" id="PNnLXPXTmZXW" # Similarly, recurrent modules with gating, such as an LSTM or GRU, can be applied on the edges, nodes, and globals of the state and input graphs separately. # + [markdown] colab_type="text" id="6m6aOhOtAz3T" # ## Other canonical Graph Net modules # # Other canonical modules discussed in Figure 4 of our [arXiv paper](https://arxiv.org/abs/1806.01261) are provided in `graph_nets.modules`: # - [`modules.GraphIndependent`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulesgraphindependent) (updates the global, node, and edge features independently, without message-passing) # - [`modules.InteractionNetwork`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulesinteractionnetwork) (an example of a "Message-passing neural network") # - [`modules.CommNet`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulescommnet) (another example of a "Message-passing neural network") # - [`modules.SelfAttention`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulesselfattention) (an example of a "Non-local neural network") # - [`modules.RelationNetwork`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulesrelationnetwork) # - [`modules.DeepSets`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-modulesdeepsets) # # See documentation for more details and corresponding references. # + [markdown] colab_type="text" id="qfhlV2qpah5O" # # Graph Net building blocks # # Custom graph net modules can be built using a few basic building blocks provided in `graph_nets.blocks`. # + [markdown] colab_type="text" id="Ta0YenqKaq7E" # ## Broadcast operations # # Broadcast operations allow to transfer information between different types of elements in the graph: # - [`blocks.broadcast_globals_to_nodes`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#blocksbroadcast_globals_to_nodesgraph-namebroadcast_globals_to_nodes): Copy/broadcast global features across all nodes. # - [`blocks.broadcast_globals_to_edges`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#blocksbroadcast_globals_to_edgesgraph-namebroadcast_globals_to_edges): Copy/broadcast global features across all edges. # - [`blocks.broadcast_sender_nodes_to_edges`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#blocksbroadcast_sender_nodes_to_edgesgraph-namebroadcast_sender_nodes_to_edges): Copy/broadcast node information from each node, across all edges for which that node is a sender. # - [`blocks.broadcast_receiver_nodes_to_edges`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#blocksbroadcast_receiver_nodes_to_edgesgraph-namebroadcast_receiver_nodes_to_edges): Copy/broadcast node information from each node, across all edges for which that node is a receiver. # + colab={} colab_type="code" id="sR6ckNvQbgbO" tf.reset_default_graph() graphs_tuple = utils_tf.data_dicts_to_graphs_tuple([data_dict_0]) updated_broadcast_globals_to_nodes = graphs_tuple.replace( nodes=blocks.broadcast_globals_to_nodes(graphs_tuple)) updated_broadcast_globals_to_edges = graphs_tuple.replace( edges=blocks.broadcast_globals_to_edges(graphs_tuple)) updated_broadcast_sender_nodes_to_edges = graphs_tuple.replace( edges=blocks.broadcast_sender_nodes_to_edges(graphs_tuple)) updated_broadcast_receiver_nodes_to_edges = graphs_tuple.replace( edges=blocks.broadcast_receiver_nodes_to_edges(graphs_tuple)) with tf.Session() as sess: output_graphs = sess.run([ graphs_tuple, updated_broadcast_globals_to_nodes, updated_broadcast_globals_to_edges, updated_broadcast_sender_nodes_to_edges, updated_broadcast_receiver_nodes_to_edges]) plot_compare_graphs(output_graphs, labels=[ "Input graph", "blocks.broadcast_globals_to_nodes", "blocks.broadcast_globals_to_edges", "blocks.broadcast_sender_nodes_to_edges", "blocks.broadcast_receiver_nodes_to_edges"]) # + [markdown] colab_type="text" id="ykPbYewdLdKM" # We can easily use broadcasters to, for example, set the value of each edge to be the sum of the first feature element of: the input edges, the sender nodes, the receiver nodes, and the global feature. # # + colab={} colab_type="code" id="w3eWOCl18Q5R" tf.reset_default_graph() graphs_tuple = utils_tf.data_dicts_to_graphs_tuple([data_dict_0]) updated_graphs_tuple = graphs_tuple.replace( edges=(graphs_tuple.edges[:, :1] + blocks.broadcast_receiver_nodes_to_edges(graphs_tuple)[:, :1] + blocks.broadcast_sender_nodes_to_edges(graphs_tuple)[:, :1] + blocks.broadcast_globals_to_edges(graphs_tuple)[:, :1])) with tf.Session() as sess: output_graphs = sess.run([ graphs_tuple, updated_graphs_tuple]) plot_compare_graphs(output_graphs, labels=[ "Input graph", "Updated graph"]) # + [markdown] colab_type="text" id="uE8bBiLbaoCX" # ## Aggregators # # Aggregators perform reduce operations between different elements of the graph: # # - [`blocks.EdgesToGlobalsAggregator`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blocksedgestoglobalsaggregator): Aggregates the sets of features for all edges into a single global set of features. # - [`blocks.NodesToGlobalsAggregator`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blocksnodestoglobalsaggregator): Aggregates the sets of features for all nodes into a single global set of features. # - [`blocks.SentEdgesToNodesAggregator`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blockssentedgestonodesaggregator): Aggregates the sets of features for all edges sent by each node into a single set of features for that node. # - [`blocks.ReceivedEdgesToNodesAggregator`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blocksreceivededgestonodesaggregator): Aggregates the sets of features for all edges received by each node into a single set of features for that. # # Different types of reduce operations are: # - [`tf.unsorted_segment_sum`](https://www.tensorflow.org/api_docs/python/tf/unsorted_segment_sum): Elementwise sum. Set to 0 for empty sets. # - [`tf.unsorted_segment_mean`](https://www.tensorflow.org/api_docs/python/tf/unsorted_segment_mean): Elementwise mean. Set to 0 for empty sets. # - [`tf.unsorted_segment_prod`](https://www.tensorflow.org/api_docs/python/tf/unsorted_segment_prod): Elementwise prod. Set to 1 for empty sets. # - [`blocks.unsorted_segment_max_or_zero`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#blocksunsorted_segment_max_or_zerovalues-indices-num_groups-nameunsorted_segment_max_or_zero): Elementwise max. Set to 0 for empty sets. # - [`blocks.unsorted_segment_min_or_zero`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#blocksunsorted_segment_max_or_zerovalues-indices-num_groups-nameunsorted_segment_min_or_zero): Elementwise min. Set to 0 for empty sets. # # + colab={} colab_type="code" id="qR_3QHNgiMYv" tf.reset_default_graph() graphs_tuple = utils_tf.data_dicts_to_graphs_tuple([data_dict_0]) reducer = tf.unsorted_segment_sum updated_edges_to_globals = graphs_tuple.replace( globals=blocks.EdgesToGlobalsAggregator(reducer=reducer)(graphs_tuple)) updated_nodes_to_globals = graphs_tuple.replace( globals=blocks.NodesToGlobalsAggregator(reducer=reducer)(graphs_tuple)) updated_sent_edges_to_nodes = graphs_tuple.replace( nodes=blocks.SentEdgesToNodesAggregator(reducer=reducer)(graphs_tuple)) updated_received_edges_to_nodes = graphs_tuple.replace( nodes=blocks.ReceivedEdgesToNodesAggregator(reducer=reducer)(graphs_tuple)) with tf.Session() as sess: output_graphs = sess.run([ graphs_tuple, updated_edges_to_globals, updated_nodes_to_globals, updated_sent_edges_to_nodes, updated_received_edges_to_nodes]) plot_compare_graphs(output_graphs, labels=[ "Input graph", "blocks.EdgesToGlobalsAggregator", "blocks.NodesToGlobalsAggregator", "blocks.SentEdgesToNodesAggregator", "blocks.ReceivedEdgesToNodesAggregator"]) # + [markdown] colab_type="text" id="lE0e3bMna96z" # ## [`blocks.EdgeBlock`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blocksedgeblock) # # An EdgeBlock consists of applying a function to the concatenation of: # - `graphs_tuple.edges` # - `blocks.broadcast_sender_nodes_to_edges(graphs_tuple)` # - `blocks.broadcast_receiver_nodes_to_edges(graphs_tuple)` # - `blocks.broadcast_globals_to_edges(graphs_tuple)` # # The result is a graph with new edge features conditioned on input edges, nodes and global features according to the graph structure. # + colab={} colab_type="code" id="b549fO5bnxNy" tf.reset_default_graph() edge_block = blocks.EdgeBlock( edge_model_fn=lambda: snt.Linear(output_size=10)) input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) output_graphs = edge_block(input_graphs) print(("Output edges size: {}".format(output_graphs.edges.shape[-1]))) # + [markdown] colab_type="text" id="FsqdEWjLa0kF" # ## [`blocks.NodeBlock`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blocksnodeblock) # # An NodeBlock consists of applying a function to the concatenation of: # - `graphs_tuple.nodes` # - `blocks.ReceivedEdgesToNodesAggregator(<reducer-function>)(graphs_tuple)` # - `blocks.broadcast_globals_to_nodes(graphs_tuple)` # # The result is a graph with new node features conditioned on input edges, nodes and global features according to the graph structure. # + colab={} colab_type="code" id="LvsrYPqgpZ5x" tf.reset_default_graph() node_block = blocks.NodeBlock( node_model_fn=lambda: snt.Linear(output_size=15)) input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) output_graphs = node_block(input_graphs) print(("Output nodes size: {}".format(output_graphs.nodes.shape[-1]))) # + [markdown] colab_type="text" id="JlNdE_rbbBOj" # ## [`blocks.GlobalBlock`](https://github.com/deepmind/graph_nets/blob/master/docs/graph_nets.md#class-blocksglobalblock) # # An GlobalBlock consists of applying a function to the concatenation of: # # - `graphs_tuple.globals` # - `blocks.EdgesToGlobalsAggregator(<reducer-function>)(graphs_tuple)` # - `blocks.NodesToGlobalsAggregator(<reducer-function>)(graphs_tuple)` # # The result is a graph with new globals features conditioned on input edges, nodes and global features. # + colab={} colab_type="code" id="sVuZ4EUzph1a" tf.reset_default_graph() global_block = blocks.GlobalBlock( global_model_fn=lambda: snt.Linear(output_size=20)) input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) output_graphs = global_block(input_graphs) print(("Output globals size: {}".format(output_graphs.globals.shape[-1]))) # + [markdown] colab_type="text" id="ZC_Y6dEZqNCx" # ## Block compositionality # # A `modules.GraphNetwork` is composed internally of a `modules.EdgeBlock`, a `modules.NodeBlock`, and a `modules.GlobalBlock`, connected serially. This allows every field in the output to be conditioned by any field in the input. # + colab={} colab_type="code" id="eN4Nz9bspqaQ" tf.reset_default_graph() graph_network = modules.GraphNetwork( edge_model_fn=lambda: snt.Linear(output_size=10), node_model_fn=lambda: snt.Linear(output_size=15), global_model_fn=lambda: snt.Linear(output_size=20)) input_graphs = utils_tf.data_dicts_to_graphs_tuple(graph_dicts) output_graphs = graph_network(input_graphs) for var in graph_network.variables: print(var) # + [markdown] colab_type="text" id="vGKTIwF6qtfR" # Most of the existing neural networks operating on graphs can be built upon this set of building blocks using their different configuration options. See `graph_nets.modules` for some examples.
graph_nets/demos/graph_nets_basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Python Day 3 # ## Recap masses = [3.54, 2.07, 9.22, 1.86, 1.171] print(masses) print(masses[0]) print(masses[2:3]) print(masses[-1]) for mass in masses: if mass > 9.0: print(mass, "is HUGE") elif mass > 3.0: print(mass, "is large") else: print(mass, "is small") import pandas as pd data_all = pd.read_csv("data/gapminder_all.csv", index_col="country") data_all.head(5) data_all.plot(kind="scatter", x="gdpPercap_2007", y="lifeExp_2007", s=data_all["pop_2007"]/1000000) # # Looping Over Data Sets for filename in ["data/gapminder_gdp_africa.csv", "data/gapminder_gdp_americas.csv"]: data = pd.read_csv(filename, index_col="country") print(filename) print(data.min()) import glob # `*` match zero or more characters # # `?` match exactly one character glob.glob("data/*.csv") glob.glob("data/*.pdb") glob.glob("data/*as*.csv") glob.glob("data/gapminder_?ll.csv") for filename in glob.glob("data/gapminder_gdp*.csv"): data = pd.read_csv(filename) print(filename, data["gdpPercap_1952"].min()) data.shape[0] import glob import pandas as pd fewest = float("Inf") for filename in glob.glob('data/*.csv'): dataframe = pd.read_csv(filename) fewest = min(fewest, dataframe.shape[0]) print('smallest file has', fewest, 'records') # ## Detour: File Paths # + from pathlib import Path p = Path("data/gapminder_gdp_africa.csv") print(p.parent) print(p.stem) print(p.suffix) # - print(p.is_dir()) # # Break until 10:05 # # Writing Functions def print_greeting(): print("Hello World!") print_greeting() def print_date(year, month, day): joined = str(year) + "/" + str(month) + "/" + str(day) print(joined) print_date(2021, 9, 23) print_date(month=9, day=23, year=2021) print_date(2021, day=23, month=9) print_date(day=23, 2021, month=9) # + def report(pressure): print("pressure is:", pressure) print("calling", report, 22.5) # - print("calling", report(22.5)) result = report(22.5) print(result) print("calling") report(22.5) def average(values): if len(values) == 0: return None return sum(values)/len(values) a = average([1,2,3]) print("average", a) print("empty average", average([])) average() # # Exercise until 10:40 # https://swcarpentry.github.io/python-novice-gapminder/16-writing-functions/index.html#encapsulation-of-an-ifprint-block # # **Put up a checkmark when you're done :)** import random for i in range(10): # simulating the mass of a chicken egg # the (random) mass will be 70 +/- 20 grams mass = 70 + 20.0 * (2.0 * random.random() - 1.0) print(mass) # egg sizing machinery prints a label if mass >= 85: print("jumbo") elif mass >= 70: print("large") elif mass < 70 and mass >= 55: print("medium") else: print("small") def get_egg_label(mass): if mass >= 85: return "jumbo" elif mass >= 70: return "large" elif mass < 70 and mass >= 55: return "medium" else: return "small" # revised version import random for i in range(10): # simulating the mass of a chicken egg # the (random) mass will be 70 +/- 20 grams hello = 70 + 20.0 * (2.0 * random.random() - 1.0) print(hello, get_egg_label(hello)) # # Break until 11:00 # # Variable Scope # + pressure = 103.9 # global variable, visible everywhere def adjust(t): temperature = t * 1.43 / pressure # temperature -> local variable, only visible inside function return temperature # - print("adjusted", adjust(0.9)) print("temperature after adjust", temperature) a = adjust(0.9) print(a) limit = 100 def clip(value): # if value is negative: return 0.0 # if value is greater than 100: return 100 return min(max(0.0, value), 100) inputValue = -22.5 print(clip(inputValue)) print(inputValue) limit = 100 def clip(value): # if value is negative: return 0.0 # if value is greater than 100: return 100 temp1 = max(0.0, value) print("temp 1", temp1) temp2 = min(temp1, 100) print("temp2", temp2) return temp2 print("output", clip(inputValue)) print(clip(120)) # # Programming Style # **Code is read much more than it is written!** def calc_bulk_density(mass, volume): """Returns dry bulk density = powder mass/powder volume. volume should be greater than 0! """ if volume < 0: return None return mass/volume help(calc_bulk_density) def calc_bulk_density(mass, volume): """Returns dry bulk density = powder mass/powder volume. volume should be greater than 0! """ assert volume > 0 return mass/volume calc_bulk_density(5, 10) calc_bulk_density(5,-10) # # Exercise until 11:51 # https://swcarpentry.github.io/python-novice-gapminder/18-style/index.html#clean-up-this-code # # **If you are looking for a partner to discuss, stop by breakout room 1** # + def string_machine(input_string, iterations): """ Takes input_string and generates a new string with -'s and *'s corresponding to characters that have identical adjacent characters or not, respectively. Iterates through this procedure with the resultant strings for the supplied number of iterations. """ print(input_string) input_string_length = len(input_string) old = input_string for i in range(iterations): new = '' # iterate through characters in previous string for j in range(input_string_length): left = j-1 right = (j+1) % input_string_length # ensure right index wraps around if old[left] == old[right]: new += '-' else: new += '*' print(new) # store new string as old old = new string_machine('et cetera', 10) # - a = "hi" help(a.join) '.'.join(['ab', 'pq', 'rs']) import this many_values = list(range(500)) print(many_values) import math results = [] for value in many_values: results.append(math.cos(value)) print(results) result = [math.cos(value) for value in many_values] # list comprehension print(result)
files/Python_day3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Automatic optimization with the PyTorch JIT # ## a worked example # *by <NAME> <<EMAIL>>* # # Today, I would like to discuss in detail some aspects of optimizing code in models, and in particular how you can let the PyTorch JIT optimize things for you. # # We will use the *Intersection over Union* loss commonly used in training detection models as an example and explore various ways to implement it in PyTorch. # # The intersection over union (or IoU) loss arises in training detection networks. # Given two axis-parallel rectangles (blue and red), we wish to compute the quotient between the are in the intersection (which is a rectangle again) and the union. In colors: # # ![iou.svg](iou.svg) # # As the intersection is always contained in the union, we know that $0 \leq IoU \leq 1$ (with the optimum being $1$, so strictly speaking $-IoU$ would be a loss). # # Note that if we have the area of the intersection and of the two rectangles, we can also express the area of the union as the sum areas of the two rectangles minus the area of the intersection (which is contained twice in the sum). # # Let $(x_1, y_1, w_1, h_1$) be the coordinates top left and the width and the height of the first rectangle and $(x_2, y_2, w_2, h_2$) those of the second. # # The intersection is easily calculated: If we have the top left and bottom right coordinates (and our coordinate system has increasing $y$ from top to bottom), we can take the maximum of the top left coordinates and the minimum of the bottom right coordinates. # So we have[^1] # $$ # x_I = \max(x_1, x_2), \qquad y_I = \max(y_1, y_2) # $$ # and - we need to calculate the bottom right corners, take the minimum and transform back to width and hight - # $$ # w_I = \min(x_1 + w_1, x_2 + w_2)-x_I. # $$ # But there is a slight complication when the rectangles don't intersect: then our formulae do not work but instead give us the rectangle "between" the two but with the corner points exchanged. This means that then $w_i$ calculated as above is actually negative, so we can fix this by enforcing a minimum of $0$ # $$ # w_I = \max \left( \min(x_1 + w_1, x_2 + w_2)-x_I,0\right), \qquad h_I = \max \left( \min(y_1 + h_1, y_2 + h_2)-y_I,0\right). # $$ # Note that these last maxmimizations with a constant would be performed in PyTorch using the `torch.clamp` function, while the (elementwise) maximum and minimum between two tensors is computed using `torch.min` and `torch.max`. # # Speaking of PyTorch, enough of the theory, let's move to practical things! # # # [^1]: I use $I$ here to mean *Intersection*, it's not an index. import torch import torch.utils.cpp_extension # The formulas above readily translate into a PyTorch function. Just to be safe, we clamp the the denominator to be at least $10^{-5}$. def ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2, eps=1e-5): xi = torch.max(x1, x2) # Intersection yi = torch.max(y1, y2) wi = torch.clamp(torch.min(x1+w1, x2+w2) - xi, min=0) hi = torch.clamp(torch.min(y1+h1, y2+h2) - yi, min=0) area_i = wi * hi # Area Intersection area_u = w1 * h1 + w2 * h2 - wi * hi # Area Union return area_i / torch.clamp(area_u, min=eps) # The function will is vector-ready just by passing in a multi-dimensional tensor. # Let's try it out with some dummy data: x1, y1, w1, h1, x2, y2, w2, h2 = torch.randn(8, 100, 1000, device='cuda').exp() ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2) # Without looking too much at the results, it seems to work. # # Let us take a short digression here. As you may know, PyTorch provides functional interfaces # in `torch.nn.functional` (often also known as `F`) as well as modules (in `torch.nn`, commonly imported as `nn`)[^2]. It does so for typical neural network components as well as the loss functions. We might wonder which is preferable for our own modelling. It is, in the end, a question of style, but I would suggest the following as a good rule of thumb: If it has (significant) parameters or even state, use the module interface (so subclass `nn.Module`). If it has not, define a function as the above. I also do this when using PyTorch's functions - e.g. I usually spell out my forward and prefer to use the function `F.relu` over the module `nn.Relu`. # # [^2]: I might say that I usually just type out the modules instead for importing them under short names. # # But enough of the digression. Can our ratio_iou calculation be made more efficient? # # One common thought when trying to make Python things more efficient is moving to C++. Fortunately PyTorch makes it very straightforward to do so, by the way of C++ extensions or custom operators. Both work the same except for the actual bindings. The difference between them is that functions in PyTorch extensions can take any parameters (by using the library PyBind11) while custom operators are restricted to the types that PyTorch supports (e.g. Tensors, `int64_t`, `double`, `std::string`, `IntList` and `TensorList`). The advantage of custom operators is that they can be used with the JIT and in C++, too. # # Happily, we can just type our C-Code into a cell and have PyTorch compile it for us. Let's do a custom operator that follows exactly the Python function above: # # + csrc = """ #include <torch/script.h> using namespace torch; Tensor iou_native(const Tensor& x1, const Tensor& y1, const Tensor& w1, const Tensor& h1, const Tensor& x2, const Tensor& y2, const Tensor& w2, const Tensor& h2) { auto xi = torch::max(x1, x2); auto yi = torch::max(y1, y2); auto wi = torch::clamp(torch::min(x1+w1, x2+w2) - xi, 0); auto hi = torch::clamp(torch::min(y1+h1, y2+h2) - yi, 0); auto area_i = wi * hi; auto area_u = w1 * h1 + w2 * h2 - wi * hi; return area_i / torch::clamp(area_u, 1e-5); } static auto registry = torch::jit::RegisterOperators("super_iou::iou_native", &iou_native); """ torch.utils.cpp_extension.load_inline("libsuperiou", csrc, is_python_module=False, verbose=True) # - # That was easy enough! Now we have a custom op unter `torch.ops`, the name it is available under is determined by the string argument to RegisterOperators - here `torch.ops.super_iou.iou_native`. (Note: If you get an error about "multiple overloads", you'll have to reload your kernel and start again... While PyTorch extensions support re-building and re-loading, custom operators run into trouble with that.) Let's see if it gives the same result as the Python version: (ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2)==torch.ops.super_iou.iou_native(x1, y1, w1, h1, x2, y2, w2, h2)).all().item() # It works. Note that in general it is safer to use `torch.almost_equal` or print `(a-b).abs().max()` to deal with numerical precision. But here, `==` works well, too. # # So how about timings? Note that we need to call `torch.cuda.synchronize()` to get valid timings on the GPU. # + def taketime(fn): _ = fn(x1, y1, w1, h1, x2, y2, w2, h2) torch.cuda.synchronize() torch.cuda.synchronize() # %timeit taketime(ratio_iou) # %timeit taketime(torch.ops.super_iou.iou_native) # - # We see that there is difference of about 5% n cuda, if we did this with CPU tensors, there would be no significant difference. Depending on the nature of the calculation, this is a typical result. For the `lltm` model in the PyTorch C++-Extension tutorial, you get a speedup of about 10% by moving to C++. But this involves a loop over the input sequence, so calls quite a few tensor operation. Here we only have a handful of operations, so moving to C++ offers little performance gain by itself. # # What is relatively slow about our code is that each operation stores intermediate results in tensors and the next operation reads those to continue. If we write our own kernel, that can be helped. I consider this the "classic way" of optimizing models. # The `TensorAccessor` (for CPU) / `PackedTensorAccessor` (for transferring sizes and strides to GPU) classes provide a convenient interface for element access. As you would in production, we multiplex the floating types through templates in `scalar_t`. # For simplicity, we only deal with 1-d tensors (this is the second argument to anything `accessor`). # + csrc = """ #include <torch/script.h> #include <ATen/Parallel.h> using namespace torch; // The cuda kernel is easy enough template<typename scalar_t> __global__ void iou_kernel_gpu(PackedTensorAccessor<scalar_t, 1> result, PackedTensorAccessor<scalar_t, 1> x1, PackedTensorAccessor<scalar_t, 1> y1, PackedTensorAccessor<scalar_t, 1> w1, PackedTensorAccessor<scalar_t, 1> h1, PackedTensorAccessor<scalar_t, 1> x2, PackedTensorAccessor<scalar_t, 1> y2, PackedTensorAccessor<scalar_t, 1> w2, PackedTensorAccessor<scalar_t, 1> h2 ) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= x1.size(0)) // we might have more threads than work to do in the last block return; // This should look very familiar. We could try reading each element only once, but let's keep it simple. scalar_t xi = max(x1[i], x2[i]); scalar_t yi = max(y1[i], y2[i]); scalar_t wi = max(min(x1[i]+w1[i], x2[i]+w2[i]) - xi, static_cast<scalar_t>(0)); scalar_t hi = max(min(y1[i]+h1[i], y2[i]+h2[i]) - yi, static_cast<scalar_t>(0)); scalar_t area_i = wi * hi; scalar_t area_u = w1[i] * h1[i] + w2[i] * h2[i] - area_i; result[i] = area_i / max(area_u, static_cast<scalar_t>(0.00001f)); } // The CPU kernel is looks similar, we could also just put it in the main function... template<typename scalar_t> void iou_kernel_cpu(TensorAccessor<scalar_t, 1> result, TensorAccessor<scalar_t, 1> x1, TensorAccessor<scalar_t, 1> y1, TensorAccessor<scalar_t, 1> w1, TensorAccessor<scalar_t, 1> h1, TensorAccessor<scalar_t, 1> x2, TensorAccessor<scalar_t, 1> y2, TensorAccessor<scalar_t, 1> w2, TensorAccessor<scalar_t, 1> h2) { // we use CPU parallelization constexpr int64_t GRAIN_SIZE = 8192; // minimum grain size for parallel execution at::parallel_for(0, x1.size(0), GRAIN_SIZE, [&](int64_t i_begin, int64_t i_end) { for (int64_t i = i_begin; i < i_end; ++i) { scalar_t xi = max(x1[i], x2[i]); scalar_t yi = max(y1[i], y2[i]); scalar_t wi = max(min(x1[i]+w1[i], x2[i]+w2[i]) - xi, static_cast<scalar_t>(0)); scalar_t hi = max(min(y1[i]+h1[i], y2[i]+h2[i]) - yi, static_cast<scalar_t>(0)); scalar_t area_i = wi * hi; scalar_t area_u = w1[i] * h1[i] + w2[i] * h2[i] - area_i; result[i] = area_i / max(area_u, static_cast<scalar_t>(0.00001f)); } }); } torch::Tensor iou_forward(const Tensor& x1, const Tensor& y1, const Tensor& w1, const Tensor& h1, const Tensor& x2, const Tensor& y2, const Tensor& w2, const Tensor& h2) { auto res = torch::empty_like(x1); for (auto& t : {x1, y1, w1, h1, x2, y2, w2, h2}) { AT_ASSERTM(t.dim()==1 && t.size(0)==x1.size(0) && t.device()==x1.device() && t.dtype()==x1.dtype(), "tensors are not of same shape and kind"); } if (x1.is_cuda()) { dim3 block(512); dim3 grid((x1.size(0)+511)/512); AT_DISPATCH_FLOATING_TYPES(x1.type(), "iou", [&] { iou_kernel_gpu<scalar_t><<<grid,block>>>(res.packed_accessor<scalar_t, 1>(), x1.packed_accessor<scalar_t, 1>(), y1.packed_accessor<scalar_t, 1>(), w1.packed_accessor<scalar_t, 1>(), h1.packed_accessor<scalar_t, 1>(), x2.packed_accessor<scalar_t, 1>(), y2.packed_accessor<scalar_t, 1>(), w2.packed_accessor<scalar_t, 1>(), h2.packed_accessor<scalar_t, 1>()); }); } else { AT_DISPATCH_FLOATING_TYPES(x1.type(), "iou", [&] { iou_kernel_cpu<scalar_t>(res.accessor<scalar_t, 1>(), x1.accessor<scalar_t, 1>(), y1.accessor<scalar_t, 1>(), w1.accessor<scalar_t, 1>(), h1.accessor<scalar_t, 1>(), x2.accessor<scalar_t, 1>(), y2.accessor<scalar_t, 1>(), w2.accessor<scalar_t, 1>(), h2.accessor<scalar_t, 1>()); }); } return res; } torch::Tensor iou_native(const Tensor& x1, const Tensor& y1, const Tensor& w1, const Tensor& h1, const Tensor& x2, const Tensor& y2, const Tensor& w2, const Tensor& h2) { auto xi = torch::max(x1, x2); auto yi = torch::max(y1, y2); auto wi = torch::clamp(torch::min(x1+w1, x2+w2) - xi, 0); auto hi = torch::clamp(torch::min(y1+h1, y2+h2) - yi, 0); auto area_i = wi * hi; auto area_u = w1 * h1 + w2 * h2 - wi * hi; return area_i / torch::clamp(area_u, 1e-5); } static auto registry = torch::jit::RegisterOperators("super_iou2::iou_forward", &iou_forward) .op("super_iou2::iou_native", &iou_native); ; """ torch.utils.cpp_extension.load_inline("iou_op", "", csrc, is_python_module=False, verbose=True) # - # Phew. That was a bit tedious, but let's see if it works! # + x1, y1, w1, h1, x2, y2, w2, h2 = [t.view(-1) for t in [x1, y1, w1, h1, x2, y2, w2, h2]] print ("check gpu", (ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2)==torch.ops.super_iou.iou_native(x1, y1, w1, h1, x2, y2, w2, h2)).all().item()) print ("check cpu", (torch.ops.super_iou2.iou_forward(x1.cpu(), y1.cpu(), w1.cpu(), h1.cpu(), x2.cpu(), y2.cpu(), w2.cpu(), h2.cpu()) == torch.ops.super_iou2.iou_forward(x1.cpu(), y1.cpu(), w1.cpu(), h1.cpu(), x2.cpu(), y2.cpu(), w2.cpu(), h2.cpu())).all().item()) # - # So it seems to work, let's time things again. torch.cuda.synchronize() # %timeit taketime(torch.ops.super_iou2.iou_forward) # %timeit taketime(ratio_iou) # Now that is a lot faster! # # However, it is not usable as is: We do not have a backward. So we need two more kernels? # Can we get something that is fast and doesn't need us to write all the infrastructure? # # It turns out we can. The PyTorch JIT has two awesome components, the *fuser* and the *autodiff* that will automatically create kernels for us. (There is a limitation, here, we need to specify the `max` argument to clamp in order for this here to work.) import math @torch.jit.script def ratio_iou_scripted(x1, y1, w1, h1, x2, y2, w2, h2): xi = torch.max(x1, x2) # Intersection (yi similarly) yi = torch.max(y1, y2) # Intersection (yi similarly) wi = torch.clamp(torch.min(x1+w1, x2+w2) - xi, min=0, max=math.inf) hi = torch.clamp(torch.min(y1+h1, y2+h2) - yi, min=0, max=math.inf) area_i = wi * hi # Area Intersection area_u = w1 * h1 + w2 * h2 - wi * hi # Area Union return area_i / torch.clamp(area_u, min=1e-5, max=math.inf) print("check", (ratio_iou_scripted(x1, y1, w1, h1, x2, y2, w2, h2)-ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2)).abs().max().item()) # Let's time it again: torch.cuda.synchronize() # %timeit taketime(torch.ops.super_iou2.iou_forward) # %timeit taketime(ratio_iou_scripted) # %timeit taketime(ratio_iou) # Not bad! We got a more than 6x speedup just by putting @torch.jit.script above our function. # While apparent factor of two off the hand-crafted kernel still isn't ideal, part of that is that the size of the tensors isn't that large. Going to 10 Million elements, we are only 25% slower than the handwritten kernel: x1, y1, w1, h1, x2, y2, w2, h2 = torch.randn(8, 10_000_000, device='cuda').exp() torch.cuda.synchronize() # %timeit taketime(torch.ops.super_iou2.iou_forward) # %timeit taketime(ratio_iou_scripted) # How did that work? We can look at the graph the JIT has built for our calculation: You see that the main graph defers to a `FusionGroup`. The fusion group represents the graph that will be compiled into our custom kernel. (Note: I assume here that you run this with parameters *not* requiring gradients, we'll repeat the same with gradients below.) ratio_iou_scripted.graph_for(x1, y1, w1, h1, x2, y2, w2, h2) # Note that even if things are shown in a fusion group, it can sometimes happen that the fuser decides it cannot create a kernel. # You can observe kernel creation by setting the environment variable `PYTORCH_FUSION_DEBUG=1` (works best on the console, the source code is written to the terminal). # # But we really wanted to get forward and backward, so let's do that. # # Here is a bit of digression again, but I'll keep it very short: Note that I use `requires_grad_()` below instead of a `requires_grad=True` argument in the `randn`. This is because now `x1` and friends are leaf variables to the autograd graph, otherwise the random tensor (not assigned a Python variable) would be the leaf variables and accumulate the grads! This is something that you can easily fool yourself with (I can't say it never happened to me before and it's a not-so-infrequent cause for people asking on the forums, too). I prefer `.requires_grad_()` over setting the attribute `.requires_grad = True` because the first is not only shorter, but also will fail if I misspell it for any reason. # # But so here is timing this with backward (I always evaluate the scripted function to not have the one-off compilation time in the timing): # + x1, y1, w1, h1, x2, y2, w2, h2 = [t.requires_grad_() for t in torch.randn(8, 100_000, device='cuda').exp()] l1 = ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2) l2 = ratio_iou_scripted(x1, y1, w1, h1, x2, y2, w2, h2) grad_out = torch.randn_like(l1) grads1 = torch.autograd.grad(l1, [x1, y1, w1, h1, x2, y2, w2, h2], grad_out) grads2 = torch.autograd.grad(l2, [x1, y1, w1, h1, x2, y2, w2, h2], grad_out) print ("check:", (l1-l2).abs().max().item(), max([(g1-g2).abs().max().item() for g1, g2 in zip(grads1, grads2)])) def time_loss_and_backward(fn): l = fn(x1, y1, w1, h1, x2, y2, w2, h2) grads = torch.autograd.grad(l, [x1, y1, w1, h1, x2, y2, w2, h2], grad_out) torch.cuda.synchronize() torch.cuda.synchronize() # %timeit time_loss_and_backward(ratio_iou) # %timeit time_loss_and_backward(ratio_iou_scripted) # - # I get a 4.5x speedup. Not bad for just adding ` @torch.jit.script`! # # My measurements have been done on my [PR #14957](https://github.com/pytorch/pytorch/pull/14957) branch. The backward optimization has had a bit of a bumpy ride in PyTorch in November 2018, as a late fix for correct gradients of broadcasted tensors has inserted summations into the backward that cannot be fused. I hope that it will be fixed soon. # # Let's look at the graph again. You see that it now is wrapped in a `DifferentiableGraph`. This means that the JIT autodiff has identified a block that it knows how to differentiate. รŒnside, you have the `FusionGroup` we already saw and a bit of broadcasting. ratio_iou_scripted.graph_for(x1, y1, w1, h1, x2, y2, w2, h2) # Let's look at the backward graph, too. I extracted the code to get the backward graph from PyTorch's testsuite. I re-define the function in order for only a single backward being defined. It tries to extract the backward graph from the latest(?) run forward, so it might be a bit fragile (rerun the definition of the ratio_iou_script and the timing with backward before backward_graph) if you run into trouble. # Note that in the output here, the bulk of the calculation (except a few `GradSumToSize`) is done in a large fusion group again. On 1.0 this would have been split into piecemeal fusiongroups with `SumToSize` in between. def backward_graph(script_module): # magic debugging stuff I learned about in the PyTorch JIT test suite graph_executor_state = script_module.get_debug_state() fwd_plan = list(graph_executor_state.execution_plans.values())[-1] grad_executor = list(fwd_plan.code.grad_executors())[-1] bwd_plan = list(grad_executor.get_debug_state().execution_plans.values())[-1] return bwd_plan.graph.copy() # in order to own the graph, we need to make a copy backward_graph(ratio_iou_scripted) # That's all for now. I hope you enjoyed this little demo. I hope you enjoyed it and appreciate your feedback and comments at <<EMAIL>>. # # On my blog https://lernapparat.de/ you'll find the slides from the talk that this demonstration accompanies.
misc/pytorch_automatic_optimization_jit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/chrismarkella/Kaggle-access-from-Google-Colab/blob/master/generator_examples.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="rujikGxVSsjk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="22c8a73c-37ad-4e12-f1fd-22f9bfa1a861" def up_to_5_gen(): for i in range(5): yield i for _ in up_to_5_gen(): print(_, end=',') # + id="DGNFPQ_ETLwa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="d6314bcc-4400-421b-9956-d72735faea91" def up_to_N_gen(n): for i in range(n): yield i for _ in up_to_N_gen(4): print(_, end=',') # + id="iTA2-62-Ts2A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4d327ee9-519e-4ead-b006-e0477dec6e93" def infinite_gen(): i = 1 while True: yield i i = i + 1 inf_gen = infinite_gen() for i in range(6): print(next(inf_gen), end=',') # + id="h9A31lyqUV4Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8d752808-3ce2-46d2-e2fa-0ec56495c60d" def odds_gen(): i = 1 while True: yield i i = i + 2 odds = odds_gen() for _ in range(10): print(next(odds), end=',') # + id="KPNd8jH2U2A3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3a157c5f-9b12-4d6f-ea82-a7f1116b0608" def evens_gen(): i = 0 while True: yield i i = i + 2 evens = evens_gen() for _ in range(12): print(next(evens), end=',') # + id="yjN8FqmtVQy5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="52eb1d14-82fd-44e2-e873-763de8ab9a22" def fibs_gen(): a,b = 0,1 while True: yield a a,b = b, a+b fibs = fibs_gen() for _ in range(100): print(next(fibs), end=', ') # + id="RUEvuis7VtQa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="83650f0f-fdb6-4730-d69d-07aefe34bf6e" def tri_fibs_gen(): """Starts with 0,0,1 then the next one is the sum of the previous three in the sequence. """ a,b,c = 0,0,1 while True: yield a a,b,c = b,c,a+b+c tri_fibs = tri_fibs_gen() for _ in range(10): print(next(tri_fibs), end=', ') # + id="_ZOAf4aBX0kM" colab_type="code" colab={}
generator_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.colors as mcolors import random import math import time from sklearn.model_selection import RandomizedSearchCV,train_test_split from sklearn.svm import SVR from sklearn.metrics import mean_squared_error, mean_absolute_error import datetime import operator plt.style.use('seaborn') confirmed_cases=pd.read_csv('confirmed_global.csv') deaths_reported=pd.read_csv('deaths_global.csv') recovered_cases=pd.read_csv('recovered_global.csv') print(confirmed_cases.columns) # + confirmed_cases.head() deaths_reported.head() recovered_cases.head() cols=confirmed_cases.keys() confirmed=confirmed_cases.loc[:, cols[4]:cols[-1]] death=deaths_reported.loc[:, cols[4]:cols[-1]] recoveries=recovered_cases.loc[:, cols[4]:cols[-1]] # - confirmed.head() # + dates=confirmed.keys() world_caes=[] total_deaths=[] mortality_rate=[] total_recovered=[] for i in dates: confirmed_sum=confirmed[i].sum() death_sum=deaths[i].sum() recovered_sum=recoveries[i].sum() worldcases.append(confirmed_sum) total_deaths.append(death_sum) mortality_rate.append(death_sum/confirmed_sum) total_recovered.append(recovered_sum) # - days_since_1_22=np.array([i for i in range(len(dates))]).reshape(-1,1) world_cases=np.array(world_cases).reshape(-1,1) total_deaths=np.array(total_deaths).reshape(-1,1) total_recovered=np.array(total_recovered).reshape(-1,1) days_in_future=15 future_forcast=np.array([i for i in range(len(dates)+days_in_future)]).reshape(-1,1) adjusted_dates=future_forcast[:10] # + start='1/22/2020' start_date=datetime.datetime.striptime(start, '%m/%d/%Y') future_forcast_dates=[] for i in range(len(future_forcast)): future_forcast_dates.append(start_date+datetime.timedelta(days=i)).strftime('%m/%d/%y') unique_countries=list(confirmed_cases['Country/Region'].uniqye()) country_confirmed_cases=[] no_cases=[] for i in unique_countries: cases=latest_confirmed[confirmed_cases['Country/Region']==i].sum() if cases>0: country_confirmed_cases.append(cases) else: no_cases.append(i) for i in no_cases: unique_countries.remove(i) unique_countries=[k for k,v in sorted(zip(unique_countries, country_confirmed_cases),key=operator.itemgetter(1),reverse=True)] for i in range(len(unique_countries)): country_confirmed_cases[i]=latest_confirmed[confirmed_caese['Country/Region']==unique_countries[i]].sum() unique_provinces=list(confirmed_cases['Province/State'].unique()) outliers=['United Kingdom','Denmark','France'] for i in outliers: unique_provinces.remove(i) province_confirmed_cases=[] no_cases=[] for i in unique_province: caes=latest_confirmed[confirmed_cases['Province/State']==i].sum() if cases>0: province_confirmed_cases.append(cases) else: no_cases.append(i) for i in no_cases: unique_province.remove(i) for i in range(len(unique_provinces)): print(f"{unique_provinces[i]}:{province_confirmed_cases[i]} cases") nan_indices=[] for i in range(len(unique_provinces)): if type(unique_provinces[i]) == float: nan_indices.append(i) unique_provinces=list(unique_provinces) provinces_confirmed_cases=list(province_confirmed_cases) for i in nan_indices: unique_provinces.pop(i) provinces_confirmed_cases.pop(i) # - plt.figure(figsize=(32, 32)) plt.barh(unique_countries, country_confirmed_cases) plt.title('Number of Covid-19 Confirmed cases in countries') plt.xlabel('Number of covid-19 Confirmed Cases') plt.show() kernel= ['poly', 'sigmoid', 'rbf'] c=[0.01,0.1,1,10] gamma=[0.01,0.1,1] epsilon=[0.01,0.1,1] shrinking=[True,False] svm_grid={'kernel':kernel,'C':c,'gamma':gamma,'epsilon':epsilon,'shrinking':shrinking} svm=SVR() svm_search=RandomisedSearch(svm,svm_grid,scoring='neg_mean_squared_error',cv=3,return_train_score=True,n_jobs=-1,n_iter=40,verbose=1) print(svm_search.best_params) svm_confirmed=svm_search.best_estimator_ svm_pred=svm_confirmed.predict(future_forecast) svm_test_pred=svm_confirmed.predict(x_test_confirmed) plt.plot(svm_test_pred) plt.plot(y_test_confirmed) print('MAE:',mean_absolute_error(svm_test_pred,y_test_pred)) print('MSE:',mean_squared_error(svm_test_pred,y_test_pred)) plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, world_cases) plt.title('Number of Coronavirus Cases Over Time', size=30) plt.xlabel('Days Since 1/22/2020', size=30) plt.ylabel('Number of Cases', size=30) plt.xticks(size=15) plt.yticks(size=15) plt.show() plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, world_cases) plt.plot(future_forcast, svm_pred, linestyle='dashed', color='purple') plt.title('Number of Coronavirus Cases Over Time', size=30) plt.xlabel('Days Since 1/22/2020', size=30) plt.ylabel('Number of Cases', size=30) plt.legend(['Confirmed Cases', 'SVM predictions']) plt.xticks(size=15) plt.yticks(size=15) plt.show() from sklearn.linear_model import LinearRegression linear_model = LinearRegression(normalize=True, fit_intercept=True) linear_model.fit(X_train_confirmed, y_train_confirmed) test_linear_pred = linear_model.predict(X_test_confirmed) linear_pred = linear_model.predict(future_forcast) print('MAE:', mean_absolute_error(test_linear_pred, y_test_confirmed)) print('MSE:',mean_squared_error(test_linear_pred, y_test_confirmed)) plt.plot(y_test_confirmed) plt.plot(test_linear_pred) plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, world_cases) plt.plot(future_forcast, linear_pred, linestyle='dashed', color='orange') plt.title('Number of Coronavirus Cases Over Time', size=30) plt.xlabel('Days Since 1/22/2020', size=30) plt.ylabel('Number of Cases', size=30) plt.legend(['Confirmed Cases', 'Linear Regression Predictions']) plt.xticks(size=15) plt.yticks(size=15) plt.show() # + print('Linear regression future predictions:') print(linear_pred[-10:]) plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, total_deaths, color='red') plt.title('Number of Coronavirus Deaths Over Time', size=30) plt.xlabel('Time', size=30) plt.ylabel('Number of Deaths', size=30) plt.xticks(size=15) plt.yticks(size=15) plt.show() # - mean_mortality_rate = np.mean(mortality_rate) plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, mortality_rate, color='orange') plt.axhline(y = mean_mortality_rate,linestyle='--', color='black') plt.title('Mortality Rate of Coronavirus Over Time', size=30) plt.legend(['mortality rate', 'y='+str(mean_mortality_rate)]) plt.xlabel('Time', size=30) plt.ylabel('Mortality Rate', size=30) plt.xticks(size=15) plt.yticks(size=15) plt.show() plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, total_recovered, color='green') plt.title('Number of Coronavirus Cases Recovered Over Time', size=30) plt.xlabel('Time', size=30) plt.ylabel('Number of Cases', size=30) plt.xticks(size=15) plt.yticks(size=15) plt.show() plt.figure(figsize=(20, 12)) plt.plot(adjusted_dates, total_deaths, color='r') plt.plot(adjusted_dates, total_recovered, color='green') plt.legend(['deaths', 'recoveries'], loc='best', fontsize=20) plt.title('Number of Coronavirus Cases', size=30) plt.xlabel('Time', size=30) plt.ylabel('Number of Cases', size=30) plt.xticks(size=15) plt.yticks(size=15) plt.show() plt.figure(figsize=(20, 12)) plt.plot(total_recovered, total_deaths) plt.title('Coronavirus Deaths vs Coronavirus Recoveries', size=30) plt.xlabel('Total number of Coronavirus Recoveries', size=30) plt.ylabel('Total number of Coronavirus Deaths', size=30) plt.xticks(size=15) plt.yticks(size=15) plt.show()
Linear Regression/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Atividade 2 # # * **Tipo**: Resultado prรกtico digital # * **Pontuaรงรฃo**: 6,0 pontos. # # [Em breve o detalhamento da atividade estarรก disponรญvel]
cclhm0069/_build/jupyter_execute/at_aval/at2.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // # Outputting HTML in a notebook // ## Display Helpers // // There are a number of helper methods for writing HTML that are available by default in a .NET notebook. // ### HTML // If you want to write out a `string` as HTML, you can use the `HTML` method: display(HTML("<b style=\"color:blue\">Hello!</b>")); // Displaying HTML using a `string` directly will display the actual string rather than rendering it as HTML. display("<b style=\"color:blue\">Hello!</b>"); // The `HTML` method signals that the content is HTML because its return type, `HtmlString`, implements `IHtmlContent`: // + var someHtml = HTML("<b style=\"color:blue\">Hello!</b>"); display(someHtml.GetType()); // - // ### Javascript // You may also want to output JavaScript. You can do this using the `Javascript` helper. Javascript(@"alert(""Hello!"");"); // ## PocketView (C#) // For more complex HTML, you can use the PocketView API. Lets start with an example: display( span( img[src:"https://en.wikipedia.org/favicon.ico", style:"height:1.5em"], a[href: @"https://en.wikipedia.org", target: "blank", style:"color:green"](b("Wikipedia")) ) ); // PocketView is an API for concisely writing HTML, in the terminology of HTML, using C# code. Just like the `HTML` method, it returns an object that implements `IHtmlContent`. You can see the actual HTML code by converting a `PocketView` to a string: // + var pocketView = span( img[src:"https://en.wikipedia.org/favicon.ico", style:"height:1.5em"], a[href: @"https://en.wikipedia.org", target: "blank", style:"color:green"](b("Wikipedia"))); display(pocketView.ToString()); // - // The PocketView API provides a number of top-level properties in your notebook that can be used to create various HTML tags. Here's the list of tags that are supported by default: // + var pocketViewTagMethods = typeof(PocketViewTags) .GetProperties() .Select(m => m.Name); display(pocketViewTagMethods); // - // Each of these properties returns a `PocketView` instance that can then be filled in with some content by passing arguments to it like a method call. // + var pocketView = i("Hello!"); display(pocketView); // - // A `PocketView` instance can also be decorated with attributes using square brackets. // + var pocketView = span[style:"font-style:italic"]("Hello!"); display(pocketView); // - // You'll notice that if you pass a `string` to `PocketView`, it will be HTML encoded for you: // + PocketView pocketView = span("<div>This string looks like HTML but it will be HTML encoded.</div>"); display(pocketView); display("Have a look at the actual HTML:"); display(pocketView.ToString()); // - // If you don't want the content to be encoded, simply pass it wrapped in a type that implements `IHtmlContent`. // + var htmlContent = HTML("<i>This won't be HTML encoded.</i>"); PocketView pocketView = span( htmlContent ); display(pocketView); // - // You can pass other types of objects of into a `PocketView` as well. When you do this, they're formatted using the plain text formatter, which by default expands the object's properties. // + PocketView pocketView = b( new { Fruit = "apple", Texture = "smooth" } ); display(pocketView); // - // ## Magic Commands // // There are also several magic commands that can be used to output HTML in your .NET notebook. // // You can output HTML... // + %%html <b>Hello!</b> // - // ...or run JavaScript... // + %%javascript alert("hello"); // - // ...or render Markdown. // + %%markdown Write a **list** ... * first * second ...or a _table_... |Fruit |Texture | |---------|--------| |apple |smooth | |durian |bumpy | // - // --- // **_See also_** // * [Object formatters](Object%20formatters.ipynb) // * [Displaying output](Displaying%20output.ipynb)
NotebookExamples/csharp/Docs/HTML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ws1OvQ3dJhxS" # ## Exp 9 Write a program to implement tic tac toe game using Q-learning agent. # ### 1818143 <NAME> # # + colab={"base_uri": "https://localhost:8080/"} id="Q6oHzSxKUxln" executionInfo={"status": "ok", "timestamp": 1614675516975, "user_tz": -330, "elapsed": 69019, "user": {"displayName": "S.HARIPRASATH 1818123", "photoUrl": "", "userId": "06201341826421820079"}} outputId="2203cb4e-73b3-4c91-8e0e-55728c7d3fb5" import numpy as np import pickle BOARD_ROWS = 3 BOARD_COLS = 3 class State: def __init__(self, p1, p2): self.board = np.zeros((BOARD_ROWS, BOARD_COLS)) self.p1 = p1 self.p2 = p2 self.isEnd = False self.boardHash = None # init p1 plays first self.playerSymbol = 1 # get unique hash of current board state def getHash(self): self.boardHash = str(self.board.reshape(BOARD_COLS * BOARD_ROWS)) return self.boardHash def winner(self): # row for i in range(BOARD_ROWS): if sum(self.board[i, :]) == 3: self.isEnd = True return 1 if sum(self.board[i, :]) == -3: self.isEnd = True return -1 # col for i in range(BOARD_COLS): if sum(self.board[:, i]) == 3: self.isEnd = True return 1 if sum(self.board[:, i]) == -3: self.isEnd = True return -1 # diagonal diag_sum1 = sum([self.board[i, i] for i in range(BOARD_COLS)]) diag_sum2 = sum([self.board[i, BOARD_COLS - i - 1] for i in range(BOARD_COLS)]) diag_sum = max(abs(diag_sum1), abs(diag_sum2)) if diag_sum == 3: self.isEnd = True if diag_sum1 == 3 or diag_sum2 == 3: return 1 else: return -1 # tie # no available positions if len(self.availablePositions()) == 0: self.isEnd = True return 0 # not end self.isEnd = False return None def availablePositions(self): positions = [] for i in range(BOARD_ROWS): for j in range(BOARD_COLS): if self.board[i, j] == 0: positions.append((i, j)) # need to be tuple return positions def updateState(self, position): self.board[position] = self.playerSymbol # switch to another player self.playerSymbol = -1 if self.playerSymbol == 1 else 1 # only when game ends def giveReward(self): result = self.winner() # backpropagate reward if result == 1: self.p1.feedReward(1) self.p2.feedReward(0) elif result == -1: self.p1.feedReward(0) self.p2.feedReward(1) else: self.p1.feedReward(0.1) self.p2.feedReward(0.5) # board reset def reset(self): self.board = np.zeros((BOARD_ROWS, BOARD_COLS)) self.boardHash = None self.isEnd = False self.playerSymbol = 1 def play(self, rounds=100): for i in range(rounds): if i % 1000 == 0: print("Rounds {}".format(i)) while not self.isEnd: # Player 1 positions = self.availablePositions() p1_action = self.p1.chooseAction(positions, self.board, self.playerSymbol) # take action and upate board state self.updateState(p1_action) board_hash = self.getHash() self.p1.addState(board_hash) # check board status if it is end win = self.winner() if win is not None: # self.showBoard() # ended with p1 either win or draw self.giveReward() self.p1.reset() self.p2.reset() self.reset() break else: # Player 2 positions = self.availablePositions() p2_action = self.p2.chooseAction(positions, self.board, self.playerSymbol) self.updateState(p2_action) board_hash = self.getHash() self.p2.addState(board_hash) win = self.winner() if win is not None: # self.showBoard() # ended with p2 either win or draw self.giveReward() self.p1.reset() self.p2.reset() self.reset() break # play with human def play2(self): while not self.isEnd: # Player 1 positions = self.availablePositions() p1_action = self.p1.chooseAction(positions, self.board, self.playerSymbol) # take action and upate board state self.updateState(p1_action) self.showBoard() # check board status if it is end win = self.winner() if win is not None: if win == 1: print(self.p1.name, "wins!") else: print("tie!") self.reset() break else: # Player 2 positions = self.availablePositions() p2_action = self.p2.chooseAction(positions) self.updateState(p2_action) self.showBoard() win = self.winner() if win is not None: if win == -1: print(self.p2.name, "wins!") else: print("tie!") self.reset() break def showBoard(self): # p1: x p2: o for i in range(0, BOARD_ROWS): print('-------------') out = '| ' for j in range(0, BOARD_COLS): if self.board[i, j] == 1: token = 'x' if self.board[i, j] == -1: token = 'o' if self.board[i, j] == 0: token = ' ' out += token + ' | ' print(out) print('-------------') class Player: def __init__(self, name, exp_rate=0.3): self.name = name self.states = [] # record all positions taken self.lr = 0.2 self.exp_rate = exp_rate self.decay_gamma = 0.9 self.states_value = {} # state -> value def getHash(self, board): boardHash = str(board.reshape(BOARD_COLS * BOARD_ROWS)) return boardHash def chooseAction(self, positions, current_board, symbol): if np.random.uniform(0, 1) <= self.exp_rate: # take random action idx = np.random.choice(len(positions)) action = positions[idx] else: value_max = -999 for p in positions: next_board = current_board.copy() next_board[p] = symbol next_boardHash = self.getHash(next_board) value = 0 if self.states_value.get(next_boardHash) is None else self.states_value.get(next_boardHash) # print("value", value) if value >= value_max: value_max = value action = p # print("{} takes action {}".format(self.name, action)) return action # append a hash state def addState(self, state): self.states.append(state) # at the end of game, backpropagate and update states value def feedReward(self, reward): for st in reversed(self.states): if self.states_value.get(st) is None: self.states_value[st] = 0 self.states_value[st] += self.lr * (self.decay_gamma * reward - self.states_value[st]) reward = self.states_value[st] def reset(self): self.states = [] def savePolicy(self): fw = open('policy_' + str(self.name), 'wb') pickle.dump(self.states_value, fw) fw.close() def loadPolicy(self, file): fr = open(file, 'rb') self.states_value = pickle.load(fr) fr.close() class HumanPlayer: def __init__(self, name): self.name = name def chooseAction(self, positions): while True: row = int(input("Input your action row:")) col = int(input("Input your action col:")) action = (row, col) if action in positions: return action # append a hash state def addState(self, state): pass # at the end of game, backpropagate and update states value def feedReward(self, reward): pass def reset(self): pass if __name__ == "__main__": # training p1 = Player("p1") p2 = Player("p2") st = State(p1, p2) print("training...") st.play(1000) # play with human p1 = Player("computer", exp_rate=0) p1.loadPolicy("policy_p1") p2 = HumanPlayer("human") st = State(p1, p2) st.play2()
1818113_ML_Exp9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout test_size=0.1 batch_size = 1024 epochs = 20 data = pd.read_csv('gene_expression_regression.csv') data_array=data.iloc[:,1:].values X = data_array[:,:943] Y = data_array[:,-1] x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=test_size) model = Sequential() model.add(Dense(512, activation='relu', input_shape=(X.shape[1],))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1, activation='tanh')) # + model.summary() model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(), metrics=['mae']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # - y_pred=model.predict(x_test) y_test np.corrcoef(y_test.flatten(), y_pred.flatten()) # # Baseline linear regression model result from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(x_train, y_train) np.corrcoef(reg.predict(x_test).flatten(), y_test)
3.Regression_gene_expression/Gene_expression_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # ![alt text](http://datascience.uci.edu/wp-content/uploads/sites/2/2014/09/data_science_logo_with_image1.png 'UCI_data_science') # ### Goals of this Lesson # - Extend the regression framework to support classification # - Logistic Regression # - Training with Gradient Descent # - Training with Newton's Method # # - Implement... # - The Logistic function # - A function to compute the Hessian matrix # - An instantiation of SciKit-Learn's Logistic regression class # # ### References # - Chapter 4 of [*Elements of Statistical Learning* by Hastie, <NAME>](http://web.stanford.edu/~hastie/local.ftp/Springer/OLD/ESLII_print4.pdf) # - [A Few Useful Things to Know about Machine Learning](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf) # - [SciKit-Learn's Logistic Regression Documentation](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) # # # ## 0. Python Preliminaries # As usual, first we need to import Numpy, Pandas, MatPlotLib... from IPython.display import Image import matplotlib.pyplot as plt import numpy as np import pandas as pd import time # %matplotlib inline # I've created two functions that we'll use later to visualize which datapoints are being assigned to which classes. They are a bit messy and not essential to the material so don't worry about understanding them. I'll be happy to explain them to anyone interested during a break or after the session. # + from matplotlib.colors import ListedColormap # A somewhat complicated function to make pretty plots def plot_classification_data(data1, data2, beta, logistic_flag=False): plt.figure() grid_size = .2 features = np.vstack((data1, data2)) # generate a grid over the plot x_min, x_max = features[:, 0].min() - .5, features[:, 0].max() + .5 y_min, y_max = features[:, 1].min() - .5, features[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, grid_size), np.arange(y_min, y_max, grid_size)) # color the grid based on the predictions if logistic_flag: Z = logistic(np.dot(np.c_[xx.ravel(), yy.ravel(), np.ones(xx.ravel().shape[0])], beta)) colorbar_label=r"Value of f($X \beta)$" else: Z = np.dot(np.c_[xx.ravel(), yy.ravel(), np.ones(xx.ravel().shape[0])], beta) colorbar_label=r"Value of $X \beta$" Z = Z.reshape(xx.shape) background_img = plt.pcolormesh(xx, yy, Z, cmap=plt.cm.coolwarm) # Also plot the training points plt.scatter(class1_features[:, 0], class1_features[:, 1], c='b', edgecolors='k', s=70) plt.scatter(class2_features[:, 0], class2_features[:, 1], c='r', edgecolors='k', s=70) plt.title('Data with Class Prediction Intensities') plt.xlabel('Feature 1') plt.ylabel('Feature 2') color_bar = plt.colorbar(background_img, orientation='horizontal') color_bar.set_label(colorbar_label) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show() # Another messy looking function to make pretty plots of basketball courts def visualize_court(log_reg_model, court_image = './data/nba_experiment/nba_court.jpg'): two_class_cmap = ListedColormap(['#FFAAAA', '#AAFFAA']) # light red for miss, light green for make x_min, x_max = 0, 50 #width (feet) of NBA court y_min, y_max = 0, 47 #length (feet) of NBA half-court grid_step_size = 0.2 grid_x, grid_y = np.meshgrid(np.arange(x_min, x_max, grid_step_size), np.arange(y_min, y_max, grid_step_size)) grid_predictions = log_reg_model.predict(np.c_[grid_x.ravel(), grid_y.ravel()]) grid_predictions = grid_predictions.reshape(grid_x.shape) fig, ax = plt.subplots() court_image = plt.imread(court_image) ax.imshow(court_image, interpolation='bilinear', origin='lower',extent=[x_min,x_max,y_min,y_max]) ax.imshow(grid_predictions, cmap=two_class_cmap, interpolation = 'nearest', alpha = 0.60, origin='lower',extent=[x_min,x_max,y_min,y_max]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.title( "Make / Miss Prediction Boundaries" ) plt.show() # - # ## 1. Classes as Targets # # Now that we've seen how to train and evaluate a linear model for real-valued responses, next we turn to classification. At first glance, jumping from regression to classification seems trivial. Say there are two classes, the first denoted by 0 and the second by 1. We could just set each $y_{i}$ to 0 or 1 according to its class membership and fit a linear model just as before. # # _** Here's an example doing just that on some artificial data... **_ # + ### function for shuffling the data and labels def shuffle_in_unison(a, b): rng_state = np.random.get_state() np.random.shuffle(a) np.random.set_state(rng_state) np.random.shuffle(b) ### calculate classification errors # return a percentage: (number misclassified)/(total number of datapoints) def calc_classification_error(predictions, class_labels): n = predictions.size num_of_errors = 0. for idx in xrange(n): if (predictions[idx] >= 0.5 and class_labels[idx]==0) or (predictions[idx] < 0.5 and class_labels[idx]==1): num_of_errors += 1 return num_of_errors/n # set the random number generator for reproducability np.random.seed(182) #### create artificial data N = 400 D = 2 # Sample the features from a Multivariate Normal Dist. mean1 = [13,5] mean2 = [5,5] covariance = [[13,5],[5,13]] class1_features = np.random.multivariate_normal(mean1,covariance,N/2) class2_features = np.random.multivariate_normal(mean2,covariance,N/2) features = np.vstack((class1_features, class2_features)) # add column of ones for bias term features = np.hstack((features,np.ones((N,1)))) # Set the class labels class1_labels = [0]*(N/2) class2_labels = [1]*(N/2) class_labels = class1_labels+class2_labels # shuffle the data shuffle_in_unison(features, class_labels) class_labels = np.array(class_labels)[np.newaxis].T ### fit the linear model --- OLS Solution beta = np.dot(np.linalg.inv(np.dot(features.T, features)),np.dot(features.T,class_labels)) ### compute error on training data predictions = np.dot(features, beta) print "Classification Error on Training Set: %.2f%%" %(calc_classification_error(predictions, class_labels) * 100) ### generate a plot plot_classification_data(class1_features, class2_features, beta) # - # That worked okay. 9.75% error is respectable. Yet, let's think a bit harder about what's going on... # * It seems a bit arbitary to set the class labels to 0 vs. 1. Why couldn't we have set them to -1 vs. +1? Or 500 vs. 230? The responses don't have the same intrinsic meaning they did before. Now the labels represent exclusive class membership whereas before they represented physical quantities (baseball player's salary, for example). # * During training, we're optimizing squared error, but the metric we truly care about is classification percentage. Squared distance seems inappropriate especially when it's not even clear to what value the responses should be set. # # Here's an idea: since we care primarily about classification error, let's make that our loss function... # # \begin{eqnarray*} # \mathcal{L}_{\mathrm{class}} = \begin{cases} 1, & \text{if $y_{i}\ne$round($\hat y_{i}$).}\\0, & \text{otherwise}.\end{cases} # \end{eqnarray*} # # where $\hat y_{i}$ is our model's prediction of label $y_{i} \in \{0,1\}$ and round() sends $\hat y_{i}$ to 0 or 1, whichever is closer. Great. Now all we have to do is perform gradient descent to train the model...wait a minute...$\mathcal{L}_{\mathrm{class}}$ isn't differentiable. # # Let's consider another loss function: # # \begin{eqnarray*} # \mathcal{L} = \sum_{i=1}^{N} -y_{i} \log \hat y_{i} - (1-y_{i}) \log (1-\hat y_{i}) # \end{eqnarray*} # # where, again, $\hat y_{i}$ is our model's prediction of label $y_{i} \in \{0,1\}$. Here $\log$ will refer to the natural logarithm, base $e$. This is called the *cross-entropy* error function. Notice it's well-suited for classification in that it is directly optimizing towards $0$ and $1$. To see this, let $y_{i}=1$. In that case, the second term is zero (due to the $1-y_{i}$ coefficient) and the loss becomes $\mathcal{L}= - \log \hat y_{i}$. Recall that $-\log \hat y_{i} = 0$ when $\hat y_{i}=1$ and that $-\log \hat y_{i} = \infty$ when $\hat y_{i}=0$. Thus, we are encouraging $\hat y_{i}$ to become equal to $1$, its class label, and incurring penalty the more it moves towards $0$. # # On an advanced note: Cross-entroy loss still may seem arbitrary to some readers. It is derived by taking the negative logarithm of the Bernoulli distribution's density function, which has support {0,1}. Therefore, we can think of each class label as the result of a Bernoulli trial--a parameterized coin flip, essentially. **Many loss functions are merely the negative logarithm of some probability density function.** Squared error is derived by taking the $-\log$ of the Normal density funciton. # # # # ## 2. Modifying the Linear Model # # Now that we have our loss function and proper labels, we turn to the model itself, represented by the parameter $\hat y$ above. What if we keep define $\hat y$ just as we did for linear regression? # \begin{equation*} # \hat y_{i} = \beta_0 + \beta_1 x_{i,1} + \dots + \beta_p x_{i,D} = \mathbf{x}_i^T \mathbf{\beta} # \end{equation*} # Notice parameterizing $\hat y_{i}$ with $\mathbf{x}_i^T\beta$ doesn't work since the value would be unconstrained and result in the loss being undefined if $\hat y\le 0$. Thus, we need a function $f$ such that $f:\mathbb{R} \mapsto (0,1)$. We can probably think-up many functions that have a range on this interval so we'll limit the functions we can use by specifying two more requirements: the function must *(1)* be differentiable (in order to perform gradient descent) and *(2)* have a probabilistic interpretation (to think of the output as the probability the input is in class 1). # # Cumulative Distribution Functions (CDFs) have all of these nice properties. They 'squeeze' their input onto $(0,1)$, are differentiable (since that's how a pdf is derived) and have a probabilistic interpretation. In this case, we can use any CDF as long as it has support on $(-\infty, +\infty)$ since this is the range of $X_i^T\beta$. # # Choosing which CDF to use can be a hard decision since each choice drags along assumptions we don't have time to go into here. We'll choose the Logistic Distribution's CDF: # \begin{equation*} # f(z; 0, 1) = \frac{1}{1+e^{-z}}. # \end{equation*} # # ![alt text](http://deeplearning.net/software/theano/_images/logistic.png) # Tradition partly dictates this choice, but **it does provide the nice interpretation that $x_i^T\beta$ is modeling the 'log odds':** # # \begin{eqnarray*} # \log \frac{\hat y}{1-\hat y} &=& \log \frac{f(z; 0, 1)}{1-f(z; 0, 1)} \\ &=& \log f(z; 0, 1) - \log (1-f(z; 0, 1) )\\ &=& -\log (1+e^{-z}) - \log (1-(1+e^{-z})^{-1}) \\ &=& -\log (1+e^{-z}) - \log e^{-z} + \log (1+e^{-z}) \\ &=& - \log e^{-z} \\ &=& z \\ &=& \mathbf{x}_i^T \mathbf{\beta} \end{eqnarray*} # # This use of the Logistic Distribution is where Logistic Regression gets its name. As a side note before proceeding, using the Normal CDF instead of the Logistic is called 'Probit Regression,' the second most popular regression framework. # ## <span style="color:red">STUDENT ACTIVITY (5 MINS)</span> # The Logistic transformation function is the key to extending regression to classification. Below you'll see the function *def logistic(z)*. Complete it by filling in the logistic function and then graph the output. # + # define the transformation function def logistic(z): # TO DO: return the output of the logistic function return 1.0/(1 + np.exp(-z)) # a few tests to make sure your function is working print "Should print 0.5:" print logistic(0) print print "Should print 0.81757...:" print logistic(1.5) print # needs to handle arrays too print "Should print [ 0.450166 0.5124974 0.98201379]:" print logistic(np.array([-.2,.05,4])) print # graph the function z = np.linspace(-6,6,50) logistic_out = logistic(z) plt.figure() # TO DO: write the line of code to plot the function plt.plot(z, logistic_out, 'b-o') plt.title("Logistic Function") plt.xlabel('Input') plt.ylabel('Output') plt.show() # - # ## 3. Logistic Regression: A Summary # # _**Data**_ # # We observe pairs $(\mathbf{x}_{i},y_{i})$ where # \begin{eqnarray*} # y_{i} \in \{ 0, 1\} &:& \mbox{class label} \\ # \mathbf{x}_{i} = (1, x_{i,1}, \dots, x_{i,D}) &:& \mbox{set of $D$ explanatory variables (aka features) and a bias term } # \end{eqnarray*} # # _** Parameters**_ # # \begin{eqnarray*} # \mathbf{\beta}^{T} = (\beta_{0}, \dots, \beta_{D}) : \mbox{values encoding the relationship between the features and label} # \end{eqnarray*} # # _** Transformation Function **_ # # \begin{equation*} # f(z_{i}=\mathbf{x}_{i} \mathbf{\beta} ) = (1+e^{-\mathbf{x}_{i} \mathbf{\beta} })^{-1} # \end{equation*} # _**Error Function**_ # # \begin{eqnarray*} # \mathcal{L} = \sum_{i=1}^{N} -y_{i} \log f(\mathbf{x}_{i} \mathbf{\beta} ) - (1-y_{i}) \log (1-f(\mathbf{x}_{i} \mathbf{\beta} )) # \end{eqnarray*} ### compute the cross-entropy error # labels: Numpy array containing the true class labels # f: column vector of predictions (i.e. output of logistic function) def cross_entropy(labels, f): return np.sum(-1*np.multiply(labels,np.log(f)) - np.multiply((np.ones(N)-labels),np.log(np.ones(N)-f))) # _** Learning $\beta$ **_ # # Like Linear Regression, learning a Logistic Regression model will entail minimizing the error function $\mathcal{L}$ above. Can we solve for $\beta$ in closed form? Let's look at the derivative of $\mathcal{L}$ with respect to $\beta$: # # \begin{eqnarray*} # \frac{\partial \mathcal{L}_{i}}{\partial \mathbf{\beta}} &=& \frac{\partial \mathcal{L}_{i}}{\partial f(z_{i})} \frac{\partial f(z_{i})}{\partial z_{i}} \frac{\partial z_{i}}{\partial \mathbf{\beta}}\\ # &=& \left[\frac{-y_{i}}{f(\mathbf{x}_{i} \mathbf{\beta})} - \frac{y_{i}-1}{1-f(\mathbf{x}_{i} \mathbf{\beta})} \right] f(\mathbf{x}_{i} \mathbf{\beta})(1-f(\mathbf{x}_{i} \mathbf{\beta}))\mathbf{x}_{i}\\ # &=& [-y_{i}(1-f(\mathbf{x}_{i} \mathbf{\beta} )) - (y_{i}-1)f(\mathbf{x}_{i} \mathbf{\beta} )]\mathbf{x}_{i}\\ # &=& [f(\mathbf{x}_{i} \mathbf{\beta} ) - y_{i}]\mathbf{x}_{i} # \end{eqnarray*} ### compute the gradient (derivative w.r.t. Beta) # features: NxD feature matrix # labels: Numpy array containing the true class labels # f: column vector of predictions (i.e. output of logistic function) def compute_Gradient(features, labels, f): return np.sum(np.multiply(f-labels,features),0)[np.newaxis].T np.sum([1,2],0) # ?np.sum # We see that the first derivative contains the term $f(X_{i}\beta)$, meaning the gradient depends on $\beta$ in some non-linear way. We have no choice but to use the Gradient Descent algorithm: # - Randomly initialize $\beta$ # - Until $\alpha || \nabla \mathcal{L} || < tol $: # - $\mathbf{\beta}_{t+1} = \mathbf{\beta}_{t} - \alpha \nabla_{\mathbf{\beta}} \mathcal{L}$ # # _** Putting it all together in a simple example... **_ # + # set the random number generator for reproducability np.random.seed(49) # Randomly initialize the Beta vector beta = np.random.multivariate_normal([0,0,0], [[1,0,0],[0,1,0],[0,0,1]], 1).T # Initialize the step-size alpha = 0.00001 # Initialize the gradient grad = np.infty # Set the tolerance tol = 1e-6 # Initialize error old_error = 0 error = [np.infty] # Run Gradient Descent start_time = time.time() iter_idx = 1 # loop until gradient updates become small while (alpha*np.linalg.norm(grad) > tol) and (iter_idx < 300): f = logistic(np.dot(features,beta)) old_error = error[-1] # track the error error.append(cross_entropy(class_labels, f)) grad = compute_Gradient(features, class_labels, f) # update parameters beta = beta - alpha*grad iter_idx += 1 end_time = time.time() print "Training ended after %i iterations, taking a total of %.2f seconds." %(iter_idx, end_time-start_time) print "Final Cross-Entropy Error: %.2f" %(error[-1]) # compute error on training data predictions = logistic(np.dot(features, beta)) print "Classification Error on Training Set: %.2f%%" %(calc_classification_error(predictions, class_labels) * 100) # generate the plot plot_classification_data(class1_features, class2_features, beta, logistic_flag=True) # - np.random.multivariate_normal([0,0,0], [[1,0,0],[0,1,0],[0,0,1]], 1).T.shape # ## 4. Newton's Method # # Choosing the step-size, $\alpha$, can be painful since there is no principled way to set it. We have little intuition for what parameter space really looks like and therefore no sense of how to move most efficiently. Knowing the curvature of the space will solve this problem (to some extent). Therefore, we arrive at Newton's Method: # # \begin{equation*} # \beta_{t+1} = \beta_{t} - (\frac{\partial^{2} \mathcal{L}}{\partial \beta \partial \beta^{T}})^{-1} \nabla_{\beta} \mathcal{L} # \end{equation*} # # where $(\frac{\partial^{2} \mathcal{L}}{\partial \beta \partial \beta^{T}})^{-1}$ is the inverse of the matrix of second derivatives, also known as the Hessian Matrix. For Logistic regression, the Hessian is # # \begin{equation*} # \frac{\partial^{2} \mathcal{L}}{\partial \beta \partial \beta^{T}} = \mathbf{X}^{T}\mathbf{A}\mathbf{X} # \end{equation*} # # where $\mathbf{A}= \mathrm{diag}(f(X_{i}\beta)(1-f(X_{i}\beta)))$, a matrix with $f''$ along its diagonal. # # Our new parameter update is: # # \begin{eqnarray*} # \beta_{t+1} &=& \beta_{t} - (\mathbf{X}^{T}\mathbf{A}\mathbf{X})^{-1}\mathbf{X}^{T}[f(\mathbf{X}\beta) - \mathbf{y}] # \end{eqnarray*} # # As you can see, we no longer need to specify a step-size. We've replaced $\alpha$ with $(\frac{\partial^{2} \mathcal{L}}{\partial \beta \partial \beta^{T}})^{-1}$ and everything else stays the same. # ## <span style="color:red">STUDENT ACTIVITY (10 MINS)</span> # # Write a function that computes the Hessian matrix ($\mathbf{X}^{T}\mathbf{A}\mathbf{X}$). # + def compute_Hessian(features, f): # X = feature matrix, size NxD), # f = predictions (logistic outputs), size Nx1 # TO DO: return the Hessian matrix, size DxD n = len(features) A = np.multiply(f,np.ones(n)[np.newaxis].T-f) A = np.diag(A.T[0]) return np.dot(features.T, np.dot(A ,features)) # a few tests to make sure your function is working X = np.array([[1,2],[3,4],[5,6]]) f = np.array([.1,.3,.5])[np.newaxis].T print "Should print [[ 8.23 10.2 ];[ 10.2 12.72]]:" print compute_Hessian(X,f) print X = np.array([[1],[4],[6]]) f = np.array([.01,.13,.55])[np.newaxis].T print "Should print [[ 10.7295]]:" print compute_Hessian(X,f) # - # _** Let's try Newton's Method on our simple example... **_ # + # set the random number generator for reproducability np.random.seed(1801843607) # Save the errors from run above no_Newton_errors = error # Randomly initialize the Beta vector beta = np.random.multivariate_normal([0,0,0], [[.1,0,0],[0,.1,0],[0,0,.1]], 1).T # Initialize error old_error = 0 error = [np.infty] # Run Newton's Method start_time = time.time() iter_idx = 1 # Loop until error doesn't change (as opposed to gradient) while (abs(error[-1] - old_error) > tol) and (iter_idx < 300): f = logistic(np.dot(features,beta)) old_error = error[-1] # track the error error.append(cross_entropy(class_labels, f)) grad = compute_Gradient(features, class_labels, f) hessian = compute_Hessian(features,f) # update parameters via Newton's method beta = beta - np.dot(np.linalg.inv(hessian),grad) iter_idx += 1 end_time = time.time() print "Training ended after %i iterations, taking a total of %.2f seconds." %(iter_idx, end_time-start_time) print "Final Cross-Entropy Error: %.2f" %(error[-1]) # compute the classification error on training data predictions = logistic(np.dot(features, beta)) print "Classification Error on Training Set: %.2f%%" %(calc_classification_error(predictions, class_labels) * 100) # generate the plot plot_classification_data(class1_features, class2_features, beta, logistic_flag=True) # - # Let's look at the training progress to see how much more efficient Newton's method is. # plot difference between with vs without Newton plt.figure() # grad descent w/ step size plt.plot(range(len(no_Newton_errors)), no_Newton_errors, 'k-', linewidth=4, label='Without Newton') # newton's method plt.plot(range(len(error)), error, 'g-', linewidth=4, label='With Newton') plt.ylim([0,300000]) plt.xlim([0,150]) plt.legend() plt.title("Newton's Method vs. Gradient Descent w/ Step Size") plt.xlabel("Training Iteration") plt.ylabel("Cross-Entropy Error") plt.show() # ## 5. Logistic Regression with SciKit-Learn # # [Here is the documentation for SciKit-Learn's implementation of Logistic Regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) # # It's quite easy to use. Let's jump right in and repeat the above experiments. # + from sklearn.linear_model import LogisticRegression # set the random number generator for reproducability np.random.seed(75) #Initialize the model skl_LogReg = LogisticRegression() #Train it start_time = time.time() skl_LogReg.fit(features, np.ravel(class_labels)) end_time = time.time() print "Training ended after %.4f seconds." %(end_time-start_time) # compute the classification error on training data predictions = skl_LogReg.predict(features) print "Classification Error on Training Set: %.2f%%" %(calc_classification_error(predictions, class_labels) * 100) # generate the plot plot_classification_data(class1_features, class2_features, skl_LogReg.coef_.T, logistic_flag=True) # - # # Experiments # # ## 6. Dataset #1: NBA Shot Outcomes # # The first real dataset we'll tackle is one describing the location and outcome of shots taken in professional basketball games. Let's use Pandas to load and examine the data. nba_shot_data = pd.read_csv('./data/nba_experiment/NBA_xy_features.csv') nba_shot_data.head() nba_shot_data.describe() # Simple enough. Now let's train a Logistic Regression model on it, leaving out a test set. # + # split data into train and test train_set_size = int(.80*len(nba_shot_data)) train_features = nba_shot_data.ix[:train_set_size,['x_Coordinate','y_Coordinate']] test_features = nba_shot_data.ix[train_set_size:,['x_Coordinate','y_Coordinate']] train_class_labels = nba_shot_data.ix[:train_set_size,['shot_outcome']] test_class_labels = nba_shot_data.ix[train_set_size:,['shot_outcome']] #Train it start_time = time.time() skl_LogReg.fit(train_features, np.ravel(train_class_labels)) end_time = time.time() print "Training ended after %.2f seconds." %(end_time-start_time) # compute the classification error on training data predictions = skl_LogReg.predict(test_features) print "Classification Error on the Test Set: %.2f%%" %(calc_classification_error(predictions, np.array(test_class_labels)) * 100) # compute the baseline error since the classes are imbalanced print "Baseline Error: %.2f%%" %(np.sum(test_class_labels)/len(test_class_labels)*100) # visualize the boundary on the basketball court visualize_court(skl_LogReg) # - # Not bad. We're beating the random baseline of 45% error. However, visualizing the decision boundary exposes a systemic problem with using a linear model on this dataset: it is not powerful enough to adapt to the geometry of the court. This is a domain-specific contraint that should be considered when selecting the model and features. For instance, a Gaussian-based classifier works a bit better, achieving 39.02% error. Its decision boundary is visualized below. # <img src="https://raw.githubusercontent.com/enalisnick/NBA_shot_analysis/master/results/spatial_features_results/Gaussian_Mixture_Model.png" alt="" style="width: 250px;"/> # Can we do better by adding more features? For instance, if we knew the position (Guard vs. Forward vs. Center) of the player taking the shot, would that help? Let's try. First, load a new dataset. # + # first we need to extract the file from the zip import zipfile zip = zipfile.ZipFile('./data/nba_experiment/NBA_all_features.csv.zip') zip.extractall('./data/nba_experiment/') nba_all_features = pd.read_csv('./data/nba_experiment/NBA_all_features.csv') nba_all_features.head() # - # One thing to notice is that this data is noisy. Look at row 2 above; it says a player made a dunk from 33 feet above the baseline--that's beyond the three point line. # ## <span style="color:red">STUDENT ACTIVITY (20 MINS)</span> # # Your task is to train Scikit-Learn's Logistic Regression model on the new NBA data. The data is split into train and test features already. Your task is to train SciKit-Learn's Logistic Regression model on the *train_features* and *train_class_labels* and then compute the test classification error--which should be around 38%-39%. **BONUS:** If you sucessfully train the SciKit-Learn model, implement gradient descent or Newton's method. # + # split data into train and test train_features = nba_all_features.ix[:train_set_size,:'Center'] test_features = nba_all_features.ix[train_set_size:,:'Center'] train_class_labels = nba_all_features.ix[:train_set_size,['shot_outcome']] test_class_labels = nba_all_features.ix[train_set_size:,['shot_outcome']] ########## TO DO: TRAIN SCIKIT-LEARN'S LOG. REG. MODEL ########## skl_LogReg.fit(train_features, np.ravel(train_class_labels)) predictions = skl_LogReg.predict(test_features) print "Classification Error on the Test Set: %.2f%%" %(calc_classification_error(predictions, np.array(test_class_labels)) * 100) ################################################################# # compute the baseline error since the classes are imbalanced print "Baseline Error: %.2f%%" %(np.sum(test_class_labels)/len(test_class_labels)*100) # we can't visualize since D>2 # - # Great! We've improved by a few percentage points. Let's look at which features the model weighted. for idx, feature in enumerate(nba_all_features): if idx<11: print "%s: %.2f" %(feature, skl_LogReg.coef_[0][idx]) # Interestingly, the classifier exploited the location features very little. The position of the player was much more important, especially if he was a center. # ## 7. Dataset #2: 20 News Groups # # For the second experiment, we'll work with the very popular '20 News Groups' dataset consisting of, well, 20 different categories of articles. SciKit-Learn already has it ready for import. # + from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer # use SciKit Learn's loading methods categories = ['soc.religion.christian', 'alt.atheism'] train_20ng = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'), categories=categories) test_20ng = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'), categories=categories) # transform the text into word counts vectorizer = CountVectorizer(stop_words='english', max_features=1000) train_vectors = vectorizer.fit_transform(train_20ng.data) test_vectors = vectorizer.transform(test_20ng.data) #use the transform fit to the training data train_targets = train_20ng.target test_targets = test_20ng.target print "The training data size is "+str(train_vectors.shape) print "The test data size is "+str(test_vectors.shape) # print the first 500 words of an article print "Example text:" print train_20ng.data[0][:500] print print "Example count vector:" #print train_vectors[0].todense() # - # As you can see, the vector is super sparse and very high dimensional--much different than the data we've been working with previously. Let's see how SciKit-Learn's Logistic Regression model handles it. # + #Train it start_time = time.time() skl_LogReg.fit(train_vectors, train_targets) end_time = time.time() print "Training ended after %.2f seconds." %(end_time-start_time) # compute the classification error on training data predictions = skl_LogReg.predict(test_vectors) print "Classification Error on the Test Set: %.2f%%" %(calc_classification_error(predictions, test_targets) * 100) # compute the baseline error since the classes are imbalanced print "Baseline Error: %.2f%%" %(100 - sum(test_targets)*100./len(test_targets)) # - # 24% error is respectable, but there's still room for improvement. In general, working with natural language is one of the hardest application domains in Machine Learning due to the fact that we often have to reduce the abstract, sometimes ambiguous semantic meaning to a superficial token. # ## <span style="color:red">STUDENT ACTIVITY</span> # # In the time remaining in the session, we'd like you to try an open-ended activity to get experience implementing the full prediction pipeline. We've provided some suggestions below, but feel free to improvise. # ### Suggestion #1: Feature Engineering for 20 News Groups # Can you beat the baseline error rate on the 20 News Groups dataset? One way to do this is to have better features--word counts are rather blunt. Go read about [TFIDF](http://www.tfidf.com/) and then use SciKit-Learn's [TFIDF Vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) to compute a new feature matrix for the 20 News Groups dataset. You should be able to get an error rate of about 40% if not better. The code is started for you below. # + from sklearn.feature_extraction.text import TfidfVectorizer #### YOUR CODE GOES HERE print "Classification Error on the Test Set: %.2f%%" %(calc_classification_error(predictions, test_targets) * 100) # compute the baseline error since the classes are imbalanced print "Baseline Error: %.2f%%" %(100 - sum(test_targets)*100./len(test_targets)) # - # ### Suggestion #2: Step-Size vs. Newton's Method for the NBA dataset # We saw the benefits of using Newton's method earlier (in section 4), but the dataset was small and artificial. Try implementing Gradient Descent with a step-size and Newton's method for one of the NBA datasets. Then compare their convergence rates by recreating the plot above showing Error vs. Training Iteration. # ### Suggestion #3: Train Logistic Regression on a new dataset # UCI's Center for Machine Learning hosts a large repository of datasets. You can find it [here](http://archive.ics.uci.edu/ml/). The datasets appropriate for classification are [here](http://archive.ics.uci.edu/ml/datasets.html?format=&task=cla&att=&area=&numAtt=&numIns=&type=&sort=nameUp&view=table). Find one you think looks interesting, download it, and run SciKit-Learn's Logistic Regression model on it. # ### Suggestion #4: Explore SciKit-Learn # We've barely scratched the surface of SciKit-Learn today. There are many more models to try; explore them [here](http://scikit-learn.org/stable/supervised_learning.html#supervised-learning). Select one, import it, and run it on the datasets above, comparing performance. We suggest trying a [Random Forrest](http://scikit-learn.org/stable/modules/ensemble.html#random-forests) or a [Support Vector Machine](http://scikit-learn.org/stable/modules/svm.html).
Session 3 - Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import random # + # Improvement: Make terrain generation procedurally (as in the paper) terrain = { "x_max": 30, "y_max": 30, "tiles": [ [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 6], [6, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 1, 1, 1, 6], [6, 1, 2, 1, 1, 2, 1, 1, 5, 5, 1, 1, 2, 2, 1, 1, 1, 1, 1, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 6, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 6, 4, 4, 4, 2, 2, 1, 1, 1, 2, 1, 1, 1, 6], [6, 1, 1, 1, 1, 5, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 6, 4, 4, 2, 2, 2, 1, 1, 1, 2, 1, 1, 1, 6], [6, 1, 1, 5, 5, 5, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 6], [6, 1, 1, 5, 5, 5, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 2, 1, 2, 2, 2, 2, 1, 1, 1, 1, 6], [6, 1, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 6], [6, 2, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 4, 4, 4, 1, 2, 1, 1, 1, 1, 1, 1, 1, 6], [6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 4, 1, 2, 1, 1, 1, 1, 1, 1, 6], [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]] } # - # ## Visualization # ### Terrain import matplotlib.pyplot as plt # + from matplotlib import colors # make a color map of fixed colors # ToDo: remake definition of this data to accept easily other values # Grass, Forest, Scrub, Stone, Water, Lava cmap = colors.ListedColormap(['#7c8d4c', '#489030', '#b5ba61', '#7c8485', '#62c1e5', '#ff707e']) bounds=[0,1,2,3,4,5,6] norm = colors.BoundaryNorm(bounds, cmap.N) fig, ax = plt.subplots(figsize=(10,10)) ax = plt.imshow(terrain['tiles'], cmap=cmap) plt.plot() # - # ## Data Definitions # + tiles = { 1:{ "type": "Grass", "passable": True, }, 2:{ "type": "Forest", "passable": True, "next_state": 3 #?? }, 3:{ "type": "Scrub", "passable": True }, 4:{ "type": "Stone", "passable": False }, 5:{ "type": "Water", "passable": True }, 6:{ "type": "Lava", "passable": True } } # - # ### **What the subject sees:** # # **Per-tile properties:** # - **Material**: an index corresponding to the tile type # - **nEnts**: The number of occupying entities. # This istechnically learnable from the list of agents, # but thismay not be true for all architectures. # We include it forconvenience here, but may deprecate it in the future # # # **Per-agent properties:** # - **Lifetime**: Number of game ticks alive thus far # - **Health**: Agents die at 0 health (hp) # - **Food**: Agents begin taking damage at 0 food or water # - **Water**: Agents begin taking damage at 0 food or water # - **Position**: Row and column of the agent # - **Position** Deltas: Offsets from the agent to the observer # - **Damage**: Most recent amount of damage taken # - **Same Color**: Whether the agent is the same color (and # thereby is in the same population) as the observer # - **Freeze**: Whether the agent is frozen in place as a result # of having been hit by a mage attack # import collections # + Position = collections.namedtuple('Position', 'x y') class subject(): def __init__(): # Genetics self.dna = None #? # Properties self.lifetime = 0 self.health = 100 self.food = 100 self.water = 100 self.position = self.spawn() # Position(x,y) def spawn(): if random.rand()>0.5: x = random.randint(1, terrain['x_max']-1) y = random.choice([1, terrain['y_max']-1]) else: x = random.choice([1, terrain['x_max']-1]) y = random.randint(1, terrain['y_max']-1) return Position(x,y) def # - class genetic(): def __init__(): self.specie = specie self.capacity = capacity self.population = None # Initialized when run self.epochs = epochs def _genesis(): self.population = [self.specie() for _ in range(self.capacity)] def run(): # 1 Initialize population self.population = self._genesis() for epoch in self.epochs:
Genetic.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Revisiรณn de cรณdigo para generaciรณn de รญndices # # **Fecha:** 7 de Abril de 2020 # # **Responsable de revisiรณn:** <NAME> # # **Cรณdigo revisado** # **1.Sobre la documentaciรณn del cรณdigo/de la funciรณn** # # ยฟSe encuentran presentes en la implementaciรณn los siguientes elementos? Por favor, ingrese explicaciones detalladas. # # **a) Descripciรณn concisa y breve de lo que hace el cรณdigo/la funciรณn** # # [Pendiente: desarollo] # # **b) Descripciรณn de sus argumentos de entrada, su significado y rango de valores que pueden tomar** # # [Pendiente: desarollo] # # **c) Descripciรณn de los tipos de argumentos de entrada y de salida (por ejemplo, valores enteros, reales, strings, dataframe, matrices, etc)** # # [Pendiente: desarollo] # # **d) Descripciรณn de la salida de la funciรณn, su significado y valores/objetos que deben regresa** # # [Pendiente: desarollo] # # **2. Cumplimiento de objetivos del cรณdigo/de la funciรณn** # # Por favor, ingrese explicaciones detalladas. # # **a) ยฟEl cรณdigo cumple los objetivos para los que fue diseรฑado?** # # [Pendiente: desarrollo] # # **b) ยฟLa salida de la funciรณn genera una lista de tamaรฑo n(n+1)/2?** # # [Pendiente: desarrollo] # # **b) ยฟLa salida de la funciรณn genera una lista de elementos (tuplas) cuyos elementos son accesibles en coordenadas (x,y)?** # # [Pendiente: desarrollo] # **3. Pruebas** # # Ocupe la presente secciรณn para hacer diseรฑo de pruebas variando los parรกmetros que recibe el cรณdigo la funciรณn en diferentes rangos para evaluae su comportamiento y/o detectar posibles fallos # # **Test 1** # # **Objetivo del test:** [Pendiente: desarrollo] # # **Implementaciรณn del test:** # Principales hallazos del test: # # * Hallazgo 1, # * Hallazgo 2. # # **Test 2*** # # **Objetivo del test:** [Pendiente: desarrollo] # # **Implementaciรณn del test:** # # **4. Resumen detallado de posibles puntos faltantes en implementaciรณn** # # * [Pendiente: desarollo] # * [Pendiente: desarollo] # * [Pendiente: desarollo] # # # **Sugerencias para resolver los puntos anteriores** # # * [Pendiente: desarollo] # * [Pendiente: desarollo] # * [Pendiente: desarollo] #
test/.ipynb_checkpoints/Rev_GeneracionIndices-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Day 1: Of Numerical Integration and Python # # Welcome to Day 1! Today, we start with our discussion of what Numerical Integration is. # ### What is Numerical Integration? # # From the point of view of a theoretician, the ideal form of the solution to a differential equation given the initial conditions, i.e. an initial value problem (IVP), would be a formula for the solution function. But sometimes obtaining a formulaic solution is not always easy, and in many cases is absolutely impossible. So, what do we do when faced with a differential equation that we cannot solve? If you are only looking for long term behavior of a solution you can always sketch a direction field. This can be done without too much difficulty for some fairly complex differential equations that we canโ€™t solve to get exact solutions. But, what if we need to determine how a specific solution behaves, including some values that the solution will take? In that case, we have to rely on numerical methods for solving the IVP such as euler's method or the Runge-Kutta Methods. # # #### Euler's Method for Numerical Integration # # We use Euler's Method to generate a numerical solution to an initial value problem of the form: # # $$\frac{dx}{dt} = f(x, t)$$ # $$x(t_o) = x_o$$ # # Firstly, we decide the interval over which we desire to find the solution, starting at the initial condition. We break this interval into small subdivisions of a fixed length $\epsilon$. Then, using the initial condition as our starting point, we generate the rest of the solution by using the iterative formulas: # # $$t_{n+1} = t_n + \epsilon$$ # $$x_{n+1} = x_n + \epsilon f(x_n, t_n)$$ # # to find the coordinates of the points in our numerical solution. We end this process once we have reached the end of the desired interval. # # The best way to understand how it works is from the following diagram: # # <img src="euler.png" alt="euler.png" width="400"/> # # #### Euler's Method in Python # Let $\frac{dx}{dt}=f(x,t)$, we want to find $x(t)$ over $t\in[0,2)$, given that $x(0)=1$ and $f(x,t) = 5x$. The exact solution of this equation would be $x(t) = e^{5t}$. # # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline def f(x,t): # define the function f(x,t) return 5*x epsilon = 0.01 # define timestep t = np.arange(0,2,epsilon) # define an array for t x = np.zeros(t.shape) # define an array for x x[0]= 1 # set initial condition for i in range(1,t.shape[0]): x[i] = epsilon*f(x[i-1],t[i-1])+x[i-1] # Euler Integration Step plt.plot(t[::5],x[::5],".",label="Eulers Solution") plt.plot(t,np.exp(5*t),label="Exact Solution") plt.xlabel("t") plt.ylabel("x") plt.legend() plt.show() # - # #### Euler and Vectors # # Euler's Method also applies to vectors and can solve simultaneous differential equations. # # The Initial Value problem now becomes: # # $$\frac{d\vec{X}}{dt} = \vec{f}(\vec{X}, t)$$ # $$\vec{X}(t_o) = \vec{X_o}$$ # # where $\vec{X}=[X_1,X_2...]$ and $\vec{f}(\vec{X}, t)=[f_1(\vec{X}, t),f_2(\vec{X}, t)...]$. # # The Euler's Method becomes: # # $$t_{n+1} = t_n + \epsilon$$ # $$\vec{X_{n+1}} = \vec{X_n} + \epsilon \vec{f}(\vec{X_n}, t_n)$$ # # Let $\frac{d\vec{X}}{dt}=f(\vec{X},t)$, we want to find $\vec{X}(t)$ over $t\in[0,2)$, given that $\vec{X}(t)=[x,y]$, $\vec{X}(0)=[1,0]$ and $f(\vec{X},t) = [x-y,y-x]$. # + def f(X,t): # define the function f(x,t) x,y = X return np.array([x-y,y-x]) epsilon = 0.01 # define timestep t = np.arange(0,2,epsilon) # define an array for t X = np.zeros((2,t.shape[0])) # define an array for x X[:,0]= [1,0] # set initial condition for i in range(1,t.shape[0]): X[:,i] = epsilon*f(X[:,i-1],t[i-1])+X[:,i-1] # Euler Integration Step plt.plot(t[::5],X[0,::5],".",label="Eulers Solution for x") plt.plot(t[::5],X[1,::5],".",label="Eulers Solution for y") plt.xlabel("t") plt.ylabel("x") plt.legend() plt.show() # - # #### A Generalized function for Euler Integration # # Now, we create a generalized function that takes in 3 inputs ie. the function $\vec{f}(\vec{y},t)$ when $\frac{d\vec{y}}{dt}=f(\vec{y},t)$, the time array, and initial vector $\vec{y_0}$. # # ##### Algorithm # - Get the required inputs: function $\vec{f}(\vec{y},t)$, initial condition vector $\vec{y_0}$ and time series $t$. Entering a time series $t$ allows for greater control over $\epsilon$ as it can now vary for each timestep. The only difference in the Euler's Method is now : $\epsilon\rightarrow\epsilon(t_n)$. # - Check if the input is of the correct datatype ie. floating point decimal. # - Create a zero matrix to hold the output. # - For each timestep, perform the euler method updation with variable $\epsilon$ and store it in the output matrix. # - Return the output timeseries matrix. # + def check_type(y,t): # Ensure Input is Correct return y.dtype == np.floating and t.dtype == np.floating class _Integrator(): def integrate(self,func,y0,t): time_delta_grid = t[1:] - t[:-1] y = np.zeros((y0.shape[0],t.shape[0])) y[:,0] = y0 for i in range(time_delta_grid.shape[0]): y[:,i+1]= time_delta_grid[i]*func(y[:,i],t[i])+y[:,i] return y def odeint_euler(func,y0,t): y0 = np.array(y0) t = np.array(t) if check_type(y0,t): return _Integrator().integrate(func,y0,t) else: print("error encountered") # + solution = odeint_euler(f,[1.,0.],t) plt.plot(t[::5],solution[0,::5],".",label="Eulers Solution for x") plt.plot(t[::5],solution[1,::5],".",label="Eulers Solution for y") plt.xlabel("t") plt.ylabel("X") plt.legend() plt.show() # - # #### Runge-Kutta Methods for Numerical Integration # # The formula for the Euler method is $x_{n+1}=x_n + \epsilon f(x_n,t_n)$ which takes a solution from $t_n$ to $t_{n+1}=t_n+\epsilon$. One might notice there is an inherent assymetry in the formula. It advances the solution through an interval $\epsilon$, but uses the derivative information at only the start of the interval. This results in an error in the order of $O(\epsilon^2)$. But, what if we take a trial step and evaluate the derivative at the midpoint of the update interval to evaluate the value of $y_{n+1}$? Take the equations: # # $$k_1=\epsilon f(x_n,t_n)$$ # # $$k_2=\epsilon f(x_n+\frac{k_1}{2},t_n+\frac{\epsilon}{2})$$ # # $$y_{n+1}=y_n+k_2+O(\epsilon^3)$$ # # The symmetrization removes the O($\epsilon^2$) error term and now the method is second order and called the second order Runge-Kutta method or the midpoint method. You can look at this method graphically as follows: # # <img src="rk2.png" alt="rk2.png" width="400"/> # # But we do not have to stop here. By further rewriting the equation, we can cancel higher order error terms and reach the most commonly used fourth-order Runge-Kutta Methods or RK4 method, which is described below: # # $$k_1=f(x_n,t_n)$$ # # $$k_2=f(x_n+\epsilon\frac{k_1}{2},t_n+\frac{\epsilon}{2})$$ # # $$k_3=f(x_n+\epsilon\frac{k_2}{2},t_n+\frac{\epsilon}{2})$$ # # $$k_4=f(x_n+\epsilon k_3,t_n+\epsilon)$$ # # $$y_{n+1}=y_n+\frac{\epsilon}{6}(k_1+2 k_2+2 k_3+k_4)+O(\epsilon^5)$$ # # Note that this numerical method is again easily converted to a vector algorithm by simply replacing $x_i$ by the vector $\vec{X_i}$. # # This method is what we will use to simulate our networks. # # # #### Generalized RK4 Method in Python # # Just like we had created a function for Euler Integration in Python, we create a generalized function for RK4 that takes in 3 inputs ie. the function $f(\vec{y},t)$ when $\frac{d\vec{y}}{dt}=f(\vec{y},t)$, the time array, and initial vector $\vec{y_0}$. We then perform the exact same integration that we had done with Euler's Method. Everything remains the same except we replace the Euler's method updation rule with the RK4 update rule. # # # + def check_type(y,t): # Ensure Input is Correct return y.dtype == np.floating and t.dtype == np.floating class _Integrator(): def integrate(self,func,y0,t): time_delta_grid = t[1:] - t[:-1] y = np.zeros((y0.shape[0],t.shape[0])) y[:,0] = y0 for i in range(time_delta_grid.shape[0]): k1 = func(y[:,i], t[i]) # RK4 Integration Steps half_step = t[i] + time_delta_grid[i] / 2 k2 = func(y[:,i] + time_delta_grid[i] * k1 / 2, half_step) k3 = func(y[:,i] + time_delta_grid[i] * k2 / 2, half_step) k4 = func(y[:,i] + time_delta_grid[i] * k3, t + time_delta_grid[i]) y[:,i+1]= (k1 + 2 * k2 + 2 * k3 + k4) * (time_delta_grid[i] / 6) + y[:,i] return y def odeint_rk4(func,y0,t): y0 = np.array(y0) t = np.array(t) if check_type(y0,t): return _Integrator().integrate(func,y0,t) else: print("error encountered") solution = odeint_rk4(f,[1.,0.],t) plt.plot(t[::5],solution[0,::5],".",label="RK4 Solution for x") plt.plot(t[::5],solution[1,::5],".",label="RK4 Solution for y") plt.xlabel("t") plt.ylabel("X") plt.legend() plt.show() # - # As an **Exercise**, try to solve the equation of a simple pendulum and observe its dynamics using Euler Method and RK4 methods. The equation of motion of a simple pendulum is given by: $$\frac{d^2s}{dt^2}=L\frac{d^2\theta}{dt^2}=-g\sin{\theta}$$ where $L$ = Length of String and $\theta$ = angle made with vertical. To solve this second order differential equation you may use a dummy variable $\omega$ representing angular velocity such that: # $$\frac{d\theta}{dt}=\omega$$ # $$\frac{d\omega}{dt}=-\frac{g}{L}\sin{\theta}$$
tutorial/Notebooks/day 1 _ Numerical Integration_ Euler Method/day 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ieee # language: python # name: ieee # --- # + import neptune ses = neptune.sessions.Session() project = ses.get_project('jakub-czakon/ieee-fraud-detection') # - BEST_EXP_ID = 'IEEEF-76' best_exp = project.get_experiments(id=BEST_EXP_ID)[0] best_exp best_exp.get_properties() best_exp.get_parameters()
notebooks/2.0-results-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # default_exp export # - # # export: nb2py # # > Code that allows you to export a notebook (.ipynb) as a python script( .py) to a target folder. # nb2py will allow you to convert the notebook (.ipynb) where the function is executed to a python script. # # The conversion applies these rules: # # * The notebook will be automatically saved when the function is executed. # * Only code cells will be converted (not markdown cells). # * A header will be added to indicate the script has been automatically generated. It also indicates where the original ipynb is. # * Cells with a #hide flag won't be converted. Flag variants like # hide, #Hide, #HIDE, ... are also acceptable. # * Empty cells and unnecessary empty lines within cells will be removed. # * By default the script will be created with the same name and in the same folder of the original notebook. But you can pass a dir folder and a different name if you wish. # * If a script with the same name already exists, it will be overwriten. #export import runpy import re import requests from urllib.parse import unquote import shutil from fastcore.script import Param,call_parse, store_false, store_true import nbformat from tsai.imports import os, time, is_colab, maybe_mount_gdrive, save_nb, to_local_time # This code is required to identify flags in the notebook. We are looking for #hide flags. # + #export """ Code in this cell is based on the nbdev library: https://github.com/fastai/nbdev/blob/master/nbdev/export.py """ def _mk_flag_re(body, n_params, comment): "Compiles a regex for finding nbdev flags" assert body!=True, 'magics no longer supported' prefix = r"\s*\#\s*" param_group = "" if n_params == -1: param_group = r"[ \t]+(.+)" if n_params == 1: param_group = r"[ \t]+(\S+)" if n_params == (0,1): param_group = r"(?:[ \t]+(\S+))?" return re.compile(rf""" # {comment}: ^ # beginning of line (since re.MULTILINE is passed) {prefix} {body} {param_group} [ \t]* # any number of spaces and/or tabs $ # end of line (since re.MULTILINE is passed) """, re.MULTILINE | re.VERBOSE) _re_hide = _mk_flag_re("hide?", 0, "Matches any line with #hide without any module name") def _get_unhidden_cells(cells): result = [] for i,cell in enumerate(cells): if cell['cell_type'] == 'code': if not _re_hide.findall(cell['source'].lower()) and cell['source'] != '': result.append(i) return result # - # This code automatically gets the name of the notebook. It's been tested to work on Jupyter notebooks, Jupyter Lab and Google Colab. # + #export """Code in this cell is a modified version of this repo: https://github.com/msm1089/ipynbname # Copyright (c) 2020 <NAME>. """ # MIT License # Copyright (c) 2020 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import json import urllib.error import urllib.request from itertools import chain from pathlib import Path, PurePath from typing import Generator, Tuple, Union import ipykernel from jupyter_core.paths import jupyter_runtime_dir from traitlets.config import MultipleInstanceError _FILE_ERROR = "Can't identify the notebook {}." _CONN_ERROR = "Unable to access server;\n" \ + "ipynbname requires either no security or token based security." def _list_maybe_running_servers(runtime_dir=None) -> Generator[dict, None, None]: """ Iterate over the server info files of running notebook servers. """ if runtime_dir is None: runtime_dir = jupyter_runtime_dir() runtime_dir = Path(runtime_dir) if runtime_dir.is_dir(): for file_name in chain( runtime_dir.glob('nbserver-*.json'), # jupyter notebook (or lab 2) runtime_dir.glob('jpserver-*.json'), # jupyterlab 3 ): yield json.loads(file_name.read_bytes()) def _get_kernel_id() -> str: """ Returns the kernel ID of the ipykernel. """ connection_file = Path(ipykernel.get_connection_file()).stem kernel_id = connection_file.split('-', 1)[1] return kernel_id def _get_sessions(srv): """ Given a server, returns sessions, or HTTPError if access is denied. NOTE: Works only when either there is no security or there is token based security. An HTTPError is raised if unable to connect to a server. """ try: qry_str = "" token = srv['token'] if token: qry_str = f"?token={token}" url = f"{srv['url']}api/sessions{qry_str}" with urllib.request.urlopen(url) as req: return json.load(req) except Exception: raise urllib.error.HTTPError(_CONN_ERROR) def _find_nb() -> Union[Tuple[dict, PurePath], Tuple[None, None]]: try: kernel_id = _get_kernel_id() except (MultipleInstanceError, RuntimeError): return None, None # Could not determine for srv in _list_maybe_running_servers(): try: sessions = _get_sessions(srv) for sess in sessions: if sess['kernel']['id'] == kernel_id: return srv, PurePath(sess['notebook']['path']) except Exception: pass # There may be stale entries in the runtime directory return None, None def get_nb_name() -> str: """ Returns the short name of the notebook w/o the .ipynb extension, or raises a FileNotFoundError exception if it cannot be determined. """ try: _, path = _find_nb() if path: return path.name else: return except: return def get_colab_nb_name(): d = requests.get('http://172.28.0.2:9000/api/sessions').json()[0] fname = unquote(d['name']) fid = unquote(d['path'].split('=')[1]) if 'https://github.com' in fid: fname = fid else: fname = Path(f'drive/MyDrive/Colab Notebooks/{fname}') return fname def get_nb_path() -> Path: """ Returns the absolute path of the notebook, or raises a FileNotFoundError exception if it cannot be determined. """ try: if is_colab(): return get_colab_nb_name() else: srv, path = _find_nb() if srv and path: root_dir = Path(srv.get('root_dir') or srv['notebook_dir']) return root_dir / path else: return except: return def nb_name_to_py(nb_name): return str(nb_name).replace(".ipynb", ".py") def get_script_path(nb_name=None): if nb_name is None: nb_name = get_nb_path() return nb_name_to_py(nb_name) # - # This code is used when trying to save a file to google drive. We first need to mount the drive. #export @call_parse def nb2py(nb: Param("absolute or relative full path to the notebook you want to convert to a python script", str)=None, folder: Param("absolute or relative path to folder of the script you will create. Defaults to current nb's directory", str)=None, name: Param("name of the script you want to create. Defaults to current nb name .ipynb by .py", str)=None, save: Param("saves the nb before converting it to a script", store_false)=True, run: Param("import and run the script", store_true)=False, verbose: Param("controls verbosity", store_false)=True, ): "Converts a notebook to a python script in a predefined folder." # make sure drive is mounted when using Colab if is_colab(): maybe_mount_gdrive() # nb path & name if nb is not None: nb_path = Path(nb) nb_path = nb_path.parent/f"{nb_path.stem}.ipynb" else: try: nb_path = get_nb_path() except: print("nb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py.") return if nb_path is None: print("nb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py.") return nb_name = nb_path.name assert os.path.isfile(nb_path), f"nb2py couldn't find {nb_path}. Please, confirm the path is correct." # save nb: only those that are run from the notebook itself if save and not is_colab() and nb is None: try: save_nb(nb_name) except: print(f"nb2py couldn't save the nb automatically. It will used last saved at {to_local_time(os.path.getmtime(nb_name))}") # script path & name if folder is not None: folder = Path(folder) else: folder = nb_path.parent if name is not None: name = f"{Path(name).stem}.py" else: name = f"{nb_path.stem}.py" script_path = folder/name # delete file if exists and create script_path folder if doesn't exist if os.path.exists(script_path): os.remove(script_path) script_path.parent.mkdir(parents=True, exist_ok=True) # Write script header with open(script_path, 'w') as f: f.write(f'# -*- coding: utf-8 -*-\n') f.write(f'"""{nb_name}\n\n') f.write(f'Automatically generated.\n\n') if nb_path is not None: f.write(f'Original file is located at:\n') f.write(f' {nb_path}\n') f.write(f'"""') # identify convertible cells (excluding empty and those with hide flags) for i in range(10): try: with open(Path(nb_path),'r', encoding='utf8') as f: nb = nbformat.reads(f.read(), as_version=4) break except: time.sleep(.5) idxs = _get_unhidden_cells(nb['cells']) pnb = nbformat.from_dict(nb) pnb['cells'] = [pnb['cells'][i] for i in idxs] # clean up cells and write script sep = '\n'* 2 for i,cell in enumerate(pnb['cells']): source_str = cell['source'].replace('\r', '') code_lines = source_str.split('\n') if code_lines == ['']: continue while code_lines[0] == '': code_lines = code_lines[1:] while code_lines[-1] == '': code_lines = code_lines[:-1] cl = [] for j in range(len(code_lines)): if list(set(code_lines[j].split(" "))) == ['']: code_lines[j] = '' if i == 0 or code_lines[j-1] != '' or code_lines[j] != '': cl.append(code_lines[j]) code_lines = cl code = sep + '\n'.join(code_lines) with open(script_path, 'a', encoding='utf8') as f: f.write(code) # check script exists assert os.path.isfile(script_path), f"an error occurred during the export and {script_path} doesn't exist" if verbose: print(f"{nb_name} converted to {script_path}") if run: runpy.run_path(script_path) return str(script_path) if not is_colab(): nb = None folder = None name = None pyname = nb2py(nb=nb, folder=folder, name=name) if pyname is not None: assert os.path.isfile(pyname) os.remove(pyname) assert not os.path.isfile(pyname) nb = '000_export.ipynb' folder = None name = None pyname = nb2py(nb=nb, folder=folder, name=name) if pyname is not None: assert os.path.isfile(pyname) os.remove(pyname) assert not os.path.isfile(pyname) nb = '../nbs/000_export' folder = None name = None pyname = nb2py(nb=nb, folder=folder, name=name) if pyname is not None: assert os.path.isfile(pyname) os.remove(pyname) assert not os.path.isfile(pyname) nb = None folder = '../test_export/' name = None pyname = nb2py(nb=nb, folder=folder, name=name) if pyname is not None: assert os.path.isfile(pyname) shutil.rmtree(folder) assert not os.path.isfile(pyname) #hide from tsai.imports import * from tsai.export import * nb_name = get_nb_name() # nb_name = '000_export.ipynb' create_scripts(nb_name);
nbs/000_export.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Expression Tutorial # # This tutorial covers data representation with Hail's expression classes. We will go over Hail's data types and the expressions that represent them, as well as a few features of expressions, such as lazy evaluation and missingness. We will also cover how expressions can refer to fields in a table or matrix table. # # As you are working through the tutorial, you can also check out the [expression API](https://hail.is/docs/0.2/expressions.html#expressions) for documentation on specific expressions and their methods, or the [expression](https://hail.is/docs/0.2/overview/expressions.html) page in the overview for more information on expressions. # # Start by importing the Hail module, which we typically abbreviate as `hl`, and initializing Hail and Spark with the [init](https://hail.is/docs/0.2/api.html#hail.init) method: # + slideshow={"slide_type": "slide"} import hail as hl hl.init() # - # ### Hail's Data Types # # Each object in Python has a data type, which can be accessed with Python's `type` method. Here is a Python string, which has type `str`. type("Python") # Hail has its own data types for representing data. Here is a Hail string, which we construct with the [str](https://hail.is/docs/0.2/functions/core.html?highlight=str#hail.expr.functions.str) method. We can access the string's Hail type with the `dtype` field. hl.str("Hail").dtype # Hail has primitive and container types, as well as a few types specific to the field of genetics. # # * primitive types: [int32](https://hail.is/docs/0.2/types.html#hail.expr.types.tint32), [int64](https://hail.is/docs/0.2/types.html#hail.expr.types.tint64), [float32](https://hail.is/docs/0.2/types.html#hail.expr.types.tfloat32), [float64](https://hail.is/docs/0.2/types.html#hail.expr.types.tfloat64), [bool](https://hail.is/docs/0.2/types.html#hail.expr.types.tbool), [str](https://hail.is/docs/0.2/types.html#hail.expr.types.tstr) # * container types: [arrays](https://hail.is/docs/0.2/types.html#hail.expr.types.tarray), [sets](https://hail.is/docs/0.2/types.html#hail.expr.types.tset), [dicts](https://hail.is/docs/0.2/types.html#hail.expr.types.tdict), [tuples](https://hail.is/docs/0.2/types.html#hail.expr.types.ttuple), [structs](https://hail.is/docs/0.2/types.html#hail.expr.types.tstruct), [intervals](https://hail.is/docs/0.2/types.html#hail.expr.types.tinterval) # * genetics types: [locus](https://hail.is/docs/0.2/types.html#hail.expr.types.tlocus), [call](https://hail.is/docs/0.2/types.html#hail.expr.types.tcall) # # Each of these types has its own constructor method, which returns an expression: hl.str("Hail") # + [markdown] slideshow={"slide_type": "slide"} # ### What is an Expression? # # Data types in Hail are represented by [expression](https://hail.is/docs/0.2/expressions.html#expressions) classes. Each data type has its own expression class. For example, an integer of type `tint32` is represented by an `Int32Expression`. # # We can construct an integer expression in Hail with the [int32](https://hail.is/docs/0.2/functions/constructors.html?highlight=int32#hail.expr.functions.int32) function. # - hl.int32(3) # To automatically impute the type when converting a Python object to a Hail expression, use the [literal](https://hail.is/docs/0.2/functions/core.html?highlight=literal#hail.expr.functions.literal) method. Let's try it out on a Python list. hl.literal(['a', 'b', 'c']) # The Python list is converted to an ArrayExpression of type `array<str>`. In other words, an array of strings. # ### Expressions are Lazy # # In languages like Python and R, expressions are evaluated and stored immediately. This is called **eager** evalutation. # + slideshow={"slide_type": "fragment"} 1 + 2 # + [markdown] slideshow={"slide_type": "slide"} # Eager evaluation won't work on datasets that won't fit in memory. Consider the UK Biobank BGEN file, which is ~2TB but decompresses to >100TB in memory. # # In order to process datasets of this size, Hail uses **lazy** evaluation. When you enter an expression, Hail doesn't execute the expression immediately; it only records what you asked to do. # + slideshow={"slide_type": "fragment"} one = hl.int32(1) three = one + 2 three # + [markdown] slideshow={"slide_type": "slide"} # Hail evaluates an expression only when it must. For example: # # - when performing an aggregation # - when calling the methods [take](https://hail.is/docs/0.2/expressions.html?highlight=take#hail.expr.expressions.Expression.take), [collect](https://hail.is/docs/0.2/expressions.html?highlight=take#hail.expr.expressions.Expression.collect), and [show](https://hail.is/docs/0.2/expressions.html?highlight=take#hail.expr.expressions.Expression.show) # - when exporting or writing to disk # # Hail evaluates expressions by streaming to accomodate very large datasets. # - # If you want to force the evaluation of an expression, you can do so by [evaluating it](https://hail.is/docs/0.2/expressions.html?highlight=take#hail.expr.expressions.Expression.eval). Note that this can only be done on an expression with no index, such as `hl.int32(1) + 2`. If the expression has an index, e.g. `table.idx + 1`, # then the `eval` method will fail. The section on indices below explains this concept further. hl.eval(three) # The [show](https://hail.is/docs/0.2/hail.Table.html?highlight=show#hail.Table.show) method can also be used to evaluate and display the expression. # + slideshow={"slide_type": "fragment"} three.show() # - # ### Missing data # # All expressions in Hail can represent missing data. Hail has a [collection of primitive operations](https://hail.is/docs/0.2/functions/core.html) for dealing with missingness. # # The [null](https://hail.is/docs/0.2/functions/core.html?highlight=null#hail.expr.functions.null) constructor can be used to create a missing expression of a specific type, such as a missing string: missing_string = hl.null(hl.tstr) # Use [is_defined](https://hail.is/docs/0.2/functions/core.html?highlight=is_defined#hail.expr.functions.is_defined) or [is_missing](https://hail.is/docs/0.2/functions/core.html?highlight=is_defined#hail.expr.functions.is_missing) to test an expression for missingness. hl.eval(hl.is_defined(missing_string)) hl.eval(hl.is_missing(missing_string)) # Expressions handle missingness in the following ways: # # * a missing value plus another value is always missing # * a conditional statement with a missing predicate is missing # * when aggregating a sum of values, the missing values are ignored # # This is different from Python's treatment of missingness, where `None + 5` would produce an error. In Hail, `hl.null(hl.tint32) + 5` produces a missing result, not an error. hl.eval(hl.is_missing(hl.null(hl.tint32) + 5)) # Here are a few more examples to illustrate how missingness is treated in Hail: # # Missingness is ignored in a summation: hl.eval(hl.sum(hl.array([1, 2, hl.null(hl.tint32)]))) # [or_missing](https://hail.is/docs/0.2/functions/core.html?highlight=is_defined#hail.expr.functions.or_missing) takes a predicate and a value. If the predicate is True, it returns the value; otherwise, it returns a missing value. x = hl.int32(5) hl.eval(hl.or_missing(x>0, x)) print(hl.eval(hl.or_missing(x>10, x))) # + [markdown] slideshow={"slide_type": "slide"} # ### Indices # # Expressions carry another piece of information: indices. Indices record the `Table` or `MatrixTable` to which the expression refers, and the axes over which the expression can vary. # # Let's see some examples from the 1000 genomes dataset: # - hl.utils.get_1kg('data/') # + slideshow={"slide_type": "fragment"} mt = hl.read_matrix_table('data/1kg.mt') mt # + [markdown] slideshow={"slide_type": "slide"} # Let's add a global field. # + slideshow={"slide_type": "fragment"} mt = mt.annotate_globals(dataset = '1kg') # + [markdown] slideshow={"slide_type": "slide"} # We can examine any field of the matrix table with the [describe](https://hail.is/docs/0.2/expressions.html?highlight=describe#hail.expr.expressions.Expression.describe) method. If we examine the field we just added, notice that it has no indices, because it is a global field. # + slideshow={"slide_type": "slide"} mt.dataset.describe() # - # The `locus` field is a row field, so it will be indexed by `row`. # + slideshow={"slide_type": "slide"} mt.locus.describe() # - # Likewise, a column field `s` will be indexed by `column`. # + slideshow={"slide_type": "slide"} mt.s.describe() # - # And finally, an entry field `GT` will be indexed by both the `row` and `column`. # + slideshow={"slide_type": "slide"} mt.GT.describe() # + [markdown] slideshow={"slide_type": "slide"} # Expressions like `locus`, `s`, and `GT` above do not have a single value, but rather a value that varies across rows or columns of `mt`. Therefore, calling the `hl.eval` function with these expressions will lead to an error. # # Global fields don't vary across rows or columns, so they can be directly evaluated: # - hl.eval(mt.dataset) # ### `show`, `take`, and `collect` # # Although expressions with indices do not have a single realizable value (calling `hl.eval` will fail), you can use `show` to print the first few values, or `take` and `collect` to localize all values into a Python list. # # `show` and `take` grab the first 10 rows by default, but you can specify a number of rows to grab. mt.s.show() mt.s.take(5) # You can [collect](https://hail.is/docs/0.2/expressions.html?highlight=collect#hail.expr.expressions.Expression.collect) an expression to localize all values, like getting a list of all sample IDs of a dataset. # # But be careful -- don't `collect` more data than can fit in memory! all_sample_ids = mt.s.collect() all_sample_ids[:5] # + [markdown] slideshow={"slide_type": "slide"} # ### Learning more # # Hail has a suite of of [functions](https://hail.is/docs/0.2/functions/index.html) to transform and build expressions. # # For further documentation on expressions, see the [expression API](https://hail.is/docs/0.2/expressions.html) and the [expression](https://hail.is/docs/0.2/overview/expressions.html) page.
hail/python/hail/docs/tutorials/03-expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Underfitting and Overfitting demo using KNN import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline data = pd.read_csv('data_knn_classification_cleaned_titanic.csv') data.head() x = data.drop(['Survived'], axis=1) y = data['Survived'] #Scaling the data from sklearn.preprocessing import StandardScaler ss = StandardScaler() x = ss.fit_transform(x) #split the data from sklearn.model_selection import train_test_split train_x, test_x, train_y, test_y = train_test_split(x, y, random_state=96, stratify=y) # # implementing KNN #imporing KNN classifier and f1 score from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.metrics import f1_score # + #creating an instance of KNN clf = KNN(n_neighbors = 12) clf.fit(train_x, train_y) train_predict = clf.predict(train_x) k1 = f1_score(train_predict, train_y) print("training: ",k1) test_predict = clf.predict(test_x) k = f1_score(test_predict, test_y) print("testing: ",k) # - def f1score(k): train_f1 = [] test_f1 = [] for i in k: clf = KNN(n_neighbors = i) clf.fit(train_x, train_y) train_predict = clf.predict(train_x) k1 = f1_score(train_predict, train_y) train_f1.append(k1) test_predict = clf.predict(test_x) k = f1_score(test_predict, test_y) test_f1.append(k) return train_f1, test_f1 k = range(1,50) train_f1, test_f1 = f1score(k) train_f1, test_f1 score = pd.DataFrame({'train score': train_f1, 'test_score':test_f1}, index = k) score #visulaising plt.plot(k, test_f1, color ='red', label ='test') plt.plot(k, train_f1, color ='green', label ='train') plt.xlabel('K Neighbors') plt.ylabel('F1 score') plt.title('f1 curve') plt.ylim(0,4,1) plt.legend() #split the data from sklearn.model_selection import train_test_split train_x, test_x, train_y, test_y = train_test_split(x, y, random_state=42, stratify=y) k = range(1,50) train_f1, test_f1 = f1score(k) #visulaising plt.plot(k, test_f1, color ='red', label ='test') plt.plot(k, train_f1, color ='green', label ='train') plt.xlabel('K Neighbors') plt.ylabel('F1 score') plt.title('f1 curve') #plt.ylim(0,4,1) plt.legend() ''' here the value of k is decided by using both train and test data , instead of (testset) that we can use validation set types: 1. Hold-out validation as we directly divide the data into praprotions, there might be a case where the validation set is biased to only one class (which mean validation set might have data of only one class, these results in set have no idea about the other class) in this we have different distributions 2. Stratified hold out in this we have equal distributions in the hold out scenario we need good amount of data to maintain, so we need to train with lot data. if the dataset is small? and we want to bulid the complex relations out of them? ''' # # Bias Variance Tradeoff ''' if variance is high then bias is low if bias is high then variance is low error high bias high variance optimally in btw fit underfit overfit bestfit k range 21<k k<11 12<k<21 complexity low high optimum Generalization error : defines the optimum model btw high bias and high varaince High variance refers to overfitting whereas high bias refers to underfitting and we do not want both of these scenarios. So, the best model is said to have low bias and low variance. '''
Under fitting and Over fitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="DweYe9FcbMK_" # ##### Copyright 2019 The TensorFlow Authors. # # # + cellView="form" colab={} colab_type="code" id="AVV2e0XKbJeX" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="sUtoed20cRJJ" # # Load CSV with tf.data # + [markdown] colab_type="text" id="1ap_W4aQcgNT" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/load_data/text"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="C-3Xbt0FfGfs" # This tutorial provides an example of how to load CSV data from a file into a `tf.data.Dataset`. # # The data used in this tutorial are taken from the Titanic passenger list. We'll try to predict the likelihood a passenger survived based on characteristics like age, gender, ticket class, and whether the person was traveling alone. # + [markdown] colab_type="text" id="fgZ9gjmPfSnK" # ## Setup # + colab={} colab_type="code" id="I4dwMQVQMQWD" # !pip install tensorflow==2.0.0-beta1 # + colab={} colab_type="code" id="baYFZMW_bJHh" from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import tensorflow as tf import tensorflow_datasets as tfds # + colab={} colab_type="code" id="Ncf5t6tgL5ZI" TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv" TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv" train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL) test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL) # + colab={} colab_type="code" id="4ONE94qulk6S" # Make numpy values easier to read. np.set_printoptions(precision=3, suppress=True) # + [markdown] colab_type="text" id="Wuqj601Qw0Ml" # ## Load data # # So we know what we're doing, lets look at the top of the CSV file we're working with. # + colab={} colab_type="code" id="54Dv7mCrf9Yw" # !head {train_file_path} # + [markdown] colab_type="text" id="YOYKQKmMj3D6" # As you can see, the columns in the CSV are labeled. We need the list later on, so let's read it out of the file. # + colab={} colab_type="code" id="v0sLG216MtwT" # CSV columns in the input file. with open(train_file_path, 'r') as f: names_row = f.readline() CSV_COLUMNS = names_row.rstrip('\n').split(',') print(CSV_COLUMNS) # + [markdown] colab_type="text" id="ZS-bt1LvWn2x" # The dataset constructor will pick these labels up automatically. # # If the file you are working with does not contain the column names in the first line, pass them in a list of strings to the `column_names` argument in the `make_csv_dataset` function. # # ```python # # CSV_COLUMNS = ['survived', 'sex', 'age', 'n_siblings_spouses', 'parch', 'fare', 'class', 'deck', 'embark_town', 'alone'] # # dataset = tf.data.experimental.make_csv_dataset( # ..., # column_names=CSV_COLUMNS, # ...) # # ``` # # + [markdown] colab_type="text" id="gZfhoX7bR9u4" # This example is going to use all the available columns. If you need to omit some columns from the dataset, create a list of just the columns you plan to use, and pass it into the (optional) `select_columns` argument of the constructor. # # # ```python # # drop_columns = ['fare', 'embark_town'] # columns_to_use = [col for col in CSV_COLUMNS if col not in drop_columns] # # dataset = tf.data.experimental.make_csv_dataset( # ..., # select_columns = columns_to_use, # ...) # # ``` # + [markdown] colab_type="text" id="67mfwr4v-mN_" # We also have to identify which column will serve as the labels for each example, and what those labels are. # + colab={} colab_type="code" id="iXROZm5f3V4E" LABELS = [0, 1] LABEL_COLUMN = 'survived' FEATURE_COLUMNS = [column for column in CSV_COLUMNS if column != LABEL_COLUMN] # + [markdown] colab_type="text" id="t4N-plO4tDXd" # Now that these constructor argument values are in place, read the CSV data from the file and create a dataset. # # (For the full documentation, see `tf.data.experimental.make_csv_dataset`) # # + colab={} colab_type="code" id="Co7UJ7gpNADC" def get_dataset(file_path): dataset = tf.data.experimental.make_csv_dataset( file_path, batch_size=12, # Artificially small to make examples easier to show. label_name=LABEL_COLUMN, na_value="?", num_epochs=1, ignore_errors=True) return dataset raw_train_data = get_dataset(train_file_path) raw_test_data = get_dataset(test_file_path) # + [markdown] colab_type="text" id="vHUQFKoQI6G7" # Each item in the dataset is a batch, represented as a tuple of (*many examples*, *many labels*). The data from the examples is organized in column-based tensors (rather than row-based tensors), each with as many elements as the batch size (12 in this case). # # It might help to see this yourself. # + colab={} colab_type="code" id="qWtFYtwXIeuj" examples, labels = next(iter(raw_train_data)) # Just the first batch. print("EXAMPLES: \n", examples, "\n") print("LABELS: \n", labels) # + [markdown] colab_type="text" id="9cryz31lxs3e" # ## Data preprocessing # + [markdown] colab_type="text" id="tSyrkSQwYHKi" # ### Categorical data # # Some of the columns in the CSV data are categorical columns. That is, the content should be one of a limited set of options. # # In the CSV, these options are represented as text. This text needs to be converted to numbers before the model can be trained. To facilitate that, we need to create a list of categorical columns, along with a list of the options available in each column. # + colab={} colab_type="code" id="mWDniduKMw-C" CATEGORIES = { 'sex': ['male', 'female'], 'class' : ['First', 'Second', 'Third'], 'deck' : ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'], 'embark_town' : ['Cherbourg', 'Southhampton', 'Queenstown'], 'alone' : ['y', 'n'] } # + [markdown] colab_type="text" id="8Ii0YWsoKBVx" # Write a function that takes a tensor of categorical values, matches it to a list of value names, and then performs a one-hot encoding. # + colab={} colab_type="code" id="bP02_BflkDbv" def process_categorical_data(data, categories): """Returns a one-hot encoded tensor representing categorical values.""" # Remove leading ' '. data = tf.strings.regex_replace(data, '^ ', '') # Remove trailing '.'. data = tf.strings.regex_replace(data, r'\.$', '') # ONE HOT ENCODE # Reshape data from 1d (a list) to a 2d (a list of one-element lists) data = tf.reshape(data, [-1, 1]) # For each element, create a new list of boolean values the length of categories, # where the truth value is element == category label data = tf.equal(categories, data) # Cast booleans to floats. data = tf.cast(data, tf.float32) # The entire encoding can fit on one line: # data = tf.cast(tf.equal(categories, tf.reshape(data, [-1, 1])), tf.float32) return data # + [markdown] colab_type="text" id="To2qbBGGMO1D" # To help you visualize this, we'll take a single category-column tensor from the first batch, preprocess it, and show the before and after state. # + colab={} colab_type="code" id="Ds7MOLMkK2Gf" class_tensor = examples['class'] class_tensor # + colab={} colab_type="code" id="HdDUSgpoTKfA" class_categories = CATEGORIES['class'] class_categories # + colab={} colab_type="code" id="yHQeR47_ObpT" processed_class = process_categorical_data(class_tensor, class_categories) processed_class # + [markdown] colab_type="text" id="ACkc_cCaTuos" # Notice the relationship between the lengths of the two inputs and the shape of the output. # + colab={} colab_type="code" id="gvvXM8m0T00O" print("Size of batch: ", len(class_tensor.numpy())) print("Number of category labels: ", len(class_categories)) print("Shape of one-hot encoded tensor: ", processed_class.shape) # + [markdown] colab_type="text" id="9AsbaFmCeJtF" # ### Continuous data # + [markdown] colab_type="text" id="o2maE8d2ijsq" # Continuous data needs to be normalized, so that the values fall between 0 and 1. To do that, write a function that multiplies each value by 1 over twice the mean of the column values. # # The function should also reshape the data into a two dimensional tensor. # # + colab={} colab_type="code" id="IwGOy61lkQw-" def process_continuous_data(data, mean): # Normalize data data = tf.cast(data, tf.float32) * 1/(2*mean) return tf.reshape(data, [-1, 1]) # + [markdown] colab_type="text" id="0Yh8R7BujTAu" # To do this calculation, you need the column means. You would obviously need to compute these in real life, but for this example we'll just provide them. # + colab={} colab_type="code" id="iNE_mTJqegGQ" MEANS = { 'age' : 29.631308, 'n_siblings_spouses' : 0.545455, 'parch' : 0.379585, 'fare' : 34.385399 } # + [markdown] colab_type="text" id="raZtRlmaj-A5" # Again, to see what this function is actually doing, we'll take a single tensor of continuous data and show it before and after processing. # + colab={} colab_type="code" id="G-t_RSBrM2Vm" age_tensor = examples['age'] age_tensor # + colab={} colab_type="code" id="M9lMLaEsjq3K" process_continuous_data(age_tensor, MEANS['age']) # + [markdown] colab_type="text" id="kPWkC4_1l3IG" # ### Preprocess the data # + [markdown] colab_type="text" id="jIvyqVAXmsN4" # Now assemble these preprocessing tasks into a single function that can be mapped to each batch in the dataset. # # # + colab={} colab_type="code" id="rMxEHN0SNPkC" def preprocess(features, labels): # Process categorial features. for feature in CATEGORIES.keys(): features[feature] = process_categorical_data(features[feature], CATEGORIES[feature]) # Process continuous features. for feature in MEANS.keys(): features[feature] = process_continuous_data(features[feature], MEANS[feature]) # Assemble features into a single tensor. features = tf.concat([features[column] for column in FEATURE_COLUMNS], 1) return features, labels # + [markdown] colab_type="text" id="34K5ESbYnkg4" # Now apply that function with `tf.Dataset.map`, and shuffle the dataset to avoid overfitting. # + colab={} colab_type="code" id="7M5km0f_1pVp" train_data = raw_train_data.map(preprocess).shuffle(500) test_data = raw_test_data.map(preprocess) # + [markdown] colab_type="text" id="IQOWatzRr2aF" # And let's see what a single example looks like. # + colab={} colab_type="code" id="Gc1o9ZpCsGGM" examples, labels = next(iter(train_data)) examples, labels # + [markdown] colab_type="text" id="aJnOromrse57" # The examples are in a two dimensional arrays of 12 items each (the batch size). Each item represents a single row in the original CSV file. The labels are a 1d tensor of 12 values. # + [markdown] colab_type="text" id="DlF_omQqtnOP" # ## Build the model # + [markdown] colab_type="text" id="lQoFh16LxtT_" # This example uses the [Keras Functional API](https://www.tensorflow.org/beta/guide/keras/functional) wrapped in a `get_model` constructor to build up a simple model. # + colab={} colab_type="code" id="JDM3FIgHNCW3" def get_model(input_dim, hidden_units=[100]): """Create a Keras model with layers. Args: input_dim: (int) The shape of an item in a batch. labels_dim: (int) The shape of a label. hidden_units: [int] the layer sizes of the DNN (input layer first) learning_rate: (float) the learning rate for the optimizer. Returns: A Keras model. """ inputs = tf.keras.Input(shape=(input_dim,)) x = inputs for units in hidden_units: x = tf.keras.layers.Dense(units, activation='relu')(x) outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) model = tf.keras.Model(inputs, outputs) return model # + [markdown] colab_type="text" id="ce9PRb_LzFpm" # The `get_model` constructor needs to know the input shape of your data (not including the batch size). # + colab={} colab_type="code" id="qX-DU34ZuKJX" input_shape, output_shape = train_data.output_shapes input_dimension = input_shape.dims[1] # [0] is the batch size # + [markdown] colab_type="text" id="hPdtI2ie0lEZ" # ## Train, evaluate, and predict # + [markdown] colab_type="text" id="8gvw1RE9zXkD" # Now the model can be instantiated and trained. # + colab={} colab_type="code" id="Q_nm28IzNDTO" model = get_model(input_dimension) model.compile( loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(train_data, epochs=20) # + [markdown] colab_type="text" id="QyDMgBurzqQo" # Once the model is trained, we can check its accuracy on the `test_data` set. # + colab={} colab_type="code" id="eB3R3ViVONOp" test_loss, test_accuracy = model.evaluate(test_data) print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy)) # + [markdown] colab_type="text" id="sTrn_pD90gdJ" # Use `tf.keras.Model.predict` to infer labels on a batch or a dataset of batches. # + colab={} colab_type="code" id="Qwcx74F3ojqe" predictions = model.predict(test_data) # Show some results for prediction, survived in zip(predictions[:10], list(test_data)[0][1][:10]): print("Predicted survival: {:.2%}".format(prediction[0]), " | Actual outcome: ", ("SURVIVED" if bool(survived) else "DIED")) # + colab={} colab_type="code" id="kMirhswgW_ln"
site/en/r2/tutorials/load_data/csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Data Expo 2009 - Airline on-time performance # ## by (<NAME>) # # ## Preliminary Wrangling # # - The data consists of flight arrival and departure details for all commercial flights within the USA, from October 1987 to April 2008. # - This is a large dataset: there are nearly 120 million records in total, and takes up 1.6 gigabytes of space compressed and 12 gigabytes when uncompressed. # - The data comes originally from RITA where it is described in detail. # - the data in bzipped csv file. # - These files have derivable variables removed, are packaged in yearly chunks and have been more heavily compressed than the originals. # - in this project we will discuss flight delay for __2008__ data set # + [markdown] slideshow={"slide_type": "subslide"} # ### Individual years: # # 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 # # ### Our scope on 2008 dataset # + [markdown] slideshow={"slide_type": "notes"} # ## How to Run the project: # - Insrt raw data download in __"./data/raw"__ # - Run the script __"Communicate-Dtata-Finding\src\data\make_dataset.py"__ # - Now you can find output data from script __"Communicate-Dtata-Finding/data/interim/*.csv"__ # - Run the notebook in __"Communicate-Dtata-Finding\notebooks\exploration.ipynb"__ to find __Exploratory data__ # - Run the notebook in __"Communicate-Dtata-Finding\notebooks\explanatory.ipynb"__ to find __Explanatory data__ # + slideshow={"slide_type": "skip"} # import all packages and set plots to be embedded inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import time # display static image online # %matplotlib inline # display zoomable images inline # #%matplotlib notebook # suppress warnings from final output import warnings warnings.simplefilter("ignore") # + [markdown] slideshow={"slide_type": "notes"} # ### User define function and general configrations # + slideshow={"slide_type": "notes"} ''' # center plot figure from IPython.core.display import HTML HTML( """ <style> .output_png { display: table-cell; text-align: center; vertical-align: middle; } </style> """ ) ''' # + slideshow={"slide_type": "skip"} # reset seaborn settings sns.reset_orig() # set plotting color base_color = sns.color_palette()[0] # + slideshow={"slide_type": "skip"} def load_dataset(name='flights'): ''' Description: load dataset acoordding to year parameter name string flights, diverted, canceled return dataframe ''' t1 = time.time() df = pd.read_csv('../data/interim/{}.csv'.format(name)) t2 = time.time() print('Elapsed loading time :', t2-t1) return df # + slideshow={"slide_type": "skip"} def plot_bar_str(df, col, inc=1, base_color=sns.color_palette()[0] , title = None , fontsize =40, figsize = None, rotation = 0, log =None): ''' Description: custom bar plot parameter df : dataframe col: destent colunm inc: increment value for yaxis base_color : bar color title: chart title fontsize: font size figsize: chart size rotation: x-axis label rotation log : y-axis log scale return dataframe ''' month_frq = df[col].value_counts() if figsize: plt.figure(figsize=figsize) if not log: month_max_count = month_frq[0] month_max_prop = month_max_count/1000 tick_prop = np.arange(0, month_max_prop, inc) tick_names = ['{:0.0f}K'.format(v) for v in tick_prop] plt.yticks(tick_prop*1000, tick_names) ax = plt.gca() ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) plt.xticks(rotation=rotation) sns.countplot(data = flights, x=col, color = base_color, ax=ax); if log : plt.yscale('log') if title: plt.title( label = title, fontsize=fontsize) plt.grid() return ax # + slideshow={"slide_type": "skip"} def sort_time_fmt(df1, col): ''' Description: create new dataframe contain hour, frequency order by hour mintes from string format column dataframe parameter : df1 : dataframe col: target column return dataframe ''' df = df1[col].value_counts().to_frame() df['hours'] = pd.to_datetime(df.index, format='%I:%M %p').hour df['minutes'] = pd.to_datetime(df.index, format='%I:%M %p').minute df = df.reset_index() df.columns = [col, 'frequency' , 'hours' , 'minutes'] df = df.sort_values(['hours', 'minutes']) df = df.drop(columns=['minutes']) df = df.groupby('hours').sum().reset_index() df.columns = [col, 'frequency'] return df # + slideshow={"slide_type": "skip"} def plot_line_month(df, x, figsize=(12, 4), title = None , fontsize =40, rotation = 0 ): ''' Description: custom line plot parameter : df : dataframe x,y : target column axes figsize : chart frame size title , fontsize : title label and size return dataframe ''' # make it a datetime so that we can sort it: # use %b because the data use the abbriviation of month df["Month"] = pd.to_datetime(df.Month, format='%b', errors='coerce').dt.month df = df.sort_values(by="Month") order = df.index max_count = df[y].max() max_prop = max_count/1000 tick_prop = np.arange(0, max_prop, 100) tick_names = ['{:0.0f}K'.format(v) for v in tick_prop] plt.figure(figsize=figsize) ax = plt.gca() ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) plt.yticks(tick_prop*1000, tick_names) if title: plt.title( label=title, fontsize=fontsize) df.plot(x=x, y=y, ax =ax); plt.grid() # + slideshow={"slide_type": "skip"} def plot_line_time_fmt(df, x, y='frequency', figsize=(12, 4), title = None , fontsize =40, xtitle= None , xfontsize =18, ytitle= None , yfontsize =18, rotation = 0 ): ''' Description: custom line plot parameter : df : dataframe x,y : target column axes figsize : chart frame size title , fontsize : title label and size xtitle , xfontsize : title label and size ytitle , yfontsize : title label and size return dataframe ''' order = df.index max_count = df[y].max() max_prop = max_count/1000 tick_prop = np.arange(0, max_prop, 100) tick_names = ['{:0.0f}K'.format(v) for v in tick_prop] plt.figure(figsize=figsize) ax = plt.gca() ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) plt.yticks(tick_prop*1000, tick_names) if title: plt.title( label=title, fontsize=fontsize) df.plot(x=x, y=y, ax =ax); plt.grid() # + slideshow={"slide_type": "skip"} def plot_hist(col, inc, title = None , fontsize =40, xtitle= None , xfontsize =18, ytitle= None , yfontsize =18, figsize = None,rotation = 0, log =None , scale=1): ''' Description: custom hist plot parameter : col : col from dataframe inc : y increment value figsize : chart frame size title , fontsize : title label and size xtitle , xfontsize : title label and size ytitle , yfontsize : title label and size return dataframe ''' if scale == 1000: k='K' else: k='' inc = 1 max_prop = (1.25*flights['ActualElapsedTimePmile'].max())/scale tick_prop = np.arange(0, max_prop, inc) tick_names = ['{:0.0f}{}'.format(v,k) for v in tick_prop] if figsize: plt.figure(figsize=figsize) plt.yticks(tick_prop*scale, tick_names) ax = plt.gca() plt.xticks(rotation=rotation) if log: plt.yscale('log') plt.grid(alpha= 0.2) sns.distplot(flights['ActualElapsedTimePmile'], ax=ax); plt.xlim(right=1); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) if title: plt.title( label=title, fontsize=fontsize) if xtitle: plt.xlabel(label=title, fontsize=xfontsize) if ytitle: plt.ylabel(label=title, fontsize=yfontsize) plt.grid() # + slideshow={"slide_type": "skip"} ''' # relative frequency # return the highest frequency month_max_count = month_frq[0] month_max_prop = month_max_count/flights.shape[0] tick_prop = np.arange(0, month_max_prop, 0.02) tick_names = ['{:0.2f}'.format(v) for v in tick_prop] plt.yticks(tick_prop*flights.shape[0], tick_names) plt.grid() sns.countplot(data = flights, x='Month', color = base_color, order = month_frq.index); ''' # + [markdown] slideshow={"slide_type": "skip"} # #### Run "../src/data/make_dataset.py" to divide the data set into three part for easier handling nan values # the output of the script in: # - ../data/interim/flights.csv # - ../data/interim/diverted.csv # - ../data/interim/canceled.csv # + slideshow={"slide_type": "skip"} # now we will load flights dataset 2008 flights = load_dataset() # print df size print(flights.shape) # dispay first 10 rows flights.head() # + slideshow={"slide_type": "skip"} # get df data types and schema flights.info() # + slideshow={"slide_type": "skip"} flights.columns # + slideshow={"slide_type": "skip"} totals_flights = flights.shape[0] # + slideshow={"slide_type": "skip"} # find the number of duplicated rows flights.duplicated().sum() # + slideshow={"slide_type": "skip"} # drop duplicated rows flights = flights.drop_duplicates() # + slideshow={"slide_type": "skip"} # check number of duplicated rows flights.duplicated().sum() # + slideshow={"slide_type": "skip"} # check for null in each column # flights.isna().sum() flights.isnull().sum() # + slideshow={"slide_type": "skip"} # drop all rows with any NaN and Null values flights = flights.dropna() # + slideshow={"slide_type": "skip"} # check for null in each column # flights.isna().sum() flights.isnull().sum() # + slideshow={"slide_type": "skip"} # the size after deleting null flights.shape[0] - totals_flights # + slideshow={"slide_type": "skip"} # change schema flights['Year'] = flights['Year'].astype('str') flights['Month'] = flights['Month'].astype('str') flights['DayofMonth'] = flights['DayofMonth'].astype('str') flights['DayOfWeek'] = flights['DayOfWeek'].astype('str') flights['DepTime'] = flights['DepTime'].astype('str') flights['CRSDepTime'] = flights['CRSDepTime'].astype('str') flights['ArrTime'] = flights['ArrTime'].astype('str') flights['CRSArrTime'] = flights['CRSArrTime'].astype('str') flights['UniqueCarrier'] = flights['UniqueCarrier'].astype('str') flights['FlightNum'] = flights['FlightNum'].astype('str') flights['TailNum'] = flights['TailNum'].astype('str') flights['ActualElapsedTime'] = flights['ActualElapsedTime'].astype('int') flights['CRSElapsedTime'] = flights['CRSElapsedTime'].astype('int') flights['AirTime'] = flights['AirTime'].astype('int') flights['ArrDelay'] = flights['ArrDelay'].astype('int') flights['DepDelay'] = flights['DepDelay'].astype('int') flights['Origin'] = flights['Origin'].astype('str') flights['Dest'] = flights['Dest'].astype('str') flights['Distance'] = flights['Distance'].astype('int') flights['TaxiIn'] = flights['TaxiIn'].astype('int') flights['TaxiOut'] = flights['TaxiOut'].astype('int') flights['CarrierDelay'] = flights['CarrierDelay'].astype('int') flights['WeatherDelay'] = flights['WeatherDelay'].astype('int') flights['NASDelay'] = flights['NASDelay'].astype('int') flights['SecurityDelay'] = flights['SecurityDelay'].astype('int') flights['LateAircraftDelay'] = flights['LateAircraftDelay'].astype('int') # + slideshow={"slide_type": "skip"} # get df data types and schema flights.info() # + slideshow={"slide_type": "skip"} flights.describe().astype(int) # + [markdown] slideshow={"slide_type": "slide"} # ## 01. Flights without cancellation nor divertion dataset: # + [markdown] slideshow={"slide_type": "subslide"} # ### What is the structure of your dataset? # # > There are 6,851,832 flight observations with 26 features in 2008 without diverted/cancelled flights and missing or incorrectโ€‹ data. # # # ### What is/are the main feature(s) of interest in your dataset? # # > Delayed flights in terms of carriers, origin & time. # # # ### What features in the dataset do you think will help support your investigation into your feature(s) of interest? # # > ArrDelay, Month, DayOfWeek, DepTime, ArrTime, UniqueCarrier. # + slideshow={"slide_type": "skip"} # now we will load flights dataset 2008 cancelled = load_dataset('canceled') # print df size print(cancelled.shape) # dispay first 10 rows cancelled.head() # + slideshow={"slide_type": "skip"} # get df data types and schema cancelled.info() # + slideshow={"slide_type": "skip"} cancelled.columns # + slideshow={"slide_type": "skip"} totals_cancelled = cancelled.shape[0] # + slideshow={"slide_type": "skip"} # find the number of duplicated rows cancelled.duplicated().sum() # + slideshow={"slide_type": "skip"} # check for null in each column # flights.isna().sum() cancelled.isnull().sum() # + slideshow={"slide_type": "skip"} # drop all columns with any NaN and NaT values cancelled = cancelled.dropna(axis=1) # + slideshow={"slide_type": "skip"} # check for null in each column # flights.isna().sum() cancelled.isnull().sum() # + slideshow={"slide_type": "skip"} # the size after deleting null cancelled.shape[0] - totals_cancelled # + slideshow={"slide_type": "skip"} # change schema cancelled['Year'] = cancelled['Year'].astype('str') cancelled['Month'] = cancelled['Month'].astype('str') cancelled['DayofMonth'] = cancelled['DayofMonth'].astype('str') cancelled['DayOfWeek'] = cancelled['DayOfWeek'].astype('str') cancelled['CRSDepTime'] = cancelled['CRSDepTime'].astype('str') cancelled['CRSArrTime'] = cancelled['CRSArrTime'].astype('str') cancelled['UniqueCarrier'] = cancelled['UniqueCarrier'].astype('str') cancelled['FlightNum'] = cancelled['FlightNum'].astype('str') cancelled['Origin'] = cancelled['Origin'].astype('str') cancelled['Dest'] = cancelled['Dest'].astype('str') cancelled['Distance'] = cancelled['Distance'].astype('int') cancelled['CancellationCode'] = cancelled['CancellationCode'].astype('str') # + slideshow={"slide_type": "skip"} # get df data types and schema cancelled.info() # + slideshow={"slide_type": "skip"} cancelled.describe().astype(int) # + [markdown] slideshow={"slide_type": "slide"} # ## 02. Flights that have been Cancellation: # + [markdown] slideshow={"slide_type": "subslide"} # ### What is the structure of your dataset? # # > There are 137,434 flight observations with 12 features in 2008. # # # ### What is/are the main feature(s) of interest in your dataset? # # - what are the worstest airlines in terms of cancelled flighes? # - what are the most cases of flights cancelled? # # # ### What features in the dataset do you think will help support your investigation into your feature(s) of interest? # # > UniqueCarrier, CancellationCode # + slideshow={"slide_type": "skip"} # now we will load flights dataset 2008 diverted = load_dataset('diverted') # print df size print(diverted.shape) # dispay first 10 rows diverted.head() # + slideshow={"slide_type": "skip"} # get df data types and schema diverted.info() # + slideshow={"slide_type": "skip"} diverted.columns # + slideshow={"slide_type": "skip"} totals_diverted = diverted.shape[0] # + slideshow={"slide_type": "skip"} # find the number of duplicated rows diverted.duplicated().sum() # + slideshow={"slide_type": "skip"} # check for null in each column # flights.isna().sum() diverted.isnull().sum() # + slideshow={"slide_type": "skip"} # drop all columns with any NaN and NaT values diverted = diverted.dropna(axis=1) # + slideshow={"slide_type": "skip"} # check for null in each column # flights.isna().sum() diverted.isnull().sum() # + slideshow={"slide_type": "skip"} # the size after deleting null diverted.shape[0] - totals_diverted # + slideshow={"slide_type": "skip"} # change schema diverted['Year'] = diverted['Year'].astype('str') diverted['Month'] = diverted['Month'].astype('str') diverted['DayofMonth'] = diverted['DayofMonth'].astype('str') diverted['DayOfWeek'] = diverted['DayOfWeek'].astype('str') diverted['DepTime'] = diverted['DepTime'].astype('str') diverted['CRSDepTime'] = diverted['CRSDepTime'].astype('str') diverted['UniqueCarrier'] = diverted['UniqueCarrier'].astype('str') diverted['FlightNum'] = diverted['FlightNum'].astype('str') diverted['DepDelay'] = diverted['DepDelay'].astype('int') diverted['Origin'] = diverted['Origin'].astype('str') diverted['Dest'] = diverted['Dest'].astype('str') diverted['Distance'] = diverted['Distance'].astype('int') # + slideshow={"slide_type": "skip"} # get df data types and schema diverted.info() # + slideshow={"slide_type": "skip"} diverted.describe().astype(int) # + [markdown] slideshow={"slide_type": "slide"} # ## 03. Flights that have been diverted: # + [markdown] slideshow={"slide_type": "subslide"} # ### What is the structure of your dataset? # # > There are 17,265 flight observations with 12 features in 2008. # # # ### What is/are the main feature(s) of interest in your dataset? # # > what are the Origin & Dest that have the most diverted flighes? # # # ### What features in the dataset do you think will help support your investigation into your feature(s) of interest? # # > Origin & Dest. # + [markdown] slideshow={"slide_type": "slide"} # ## Expoloring Flights dataset: # + [markdown] slideshow={"slide_type": "subslide"} # ### 01. Flight dataset Year column: # range : 1987-2008 # + slideshow={"slide_type": "subslide"} flights.Year.value_counts() # + [markdown] slideshow={"slide_type": "subslide"} # ### 02. Flight dataset Month column: # range : 1-12 # + slideshow={"slide_type": "skip"} flights.Month.describe() # + slideshow={"slide_type": "skip"} months = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] # substitute number with actual day of month name for i in flights.Month.unique(): if str(i).isnumeric(): flights.Month.replace(i,months[int(i)], inplace=True) # + slideshow={"slide_type": "skip"} flights.Month.unique() # + slideshow={"slide_type": "skip"} plot_bar_str(flights, 'Month', 100, base_color, figsize=(11, 5), title = 'The Number Of Flights Per Month', fontsize =14); plt.xlabel('Months In 2008',fontsize =14); plt.ylabel('Number Of Flights',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # ### Bar plot indicate the number of flights per month # + slideshow={"slide_type": "subslide"} plot_bar_str(flights, 'Month', 100, base_color, figsize=(11, 5), title = 'The Number Of Flights Per Month', fontsize =14, log =True) plt.xlabel('Months In 2008',fontsize =14); plt.ylabel('Number Of Flights',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # - less flights on winter November , December # + [markdown] slideshow={"slide_type": "subslide"} # ### 03. Flight dataset DayofMonth column: # range : 1-31 # + slideshow={"slide_type": "skip"} flights.DayofMonth.describe() # + [markdown] slideshow={"slide_type": "subslide"} # ### Bar plot indicate the number of flights per day of month # + slideshow={"slide_type": "subslide"} plot_bar_str(flights, 'DayofMonth', 50, base_color, figsize=(15, 5), title = 'The Number Of Flights Per Day of Month', fontsize =14); plt.xlabel('Days of Months In 2008',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); # + [markdown] slideshow={"slide_type": "subslide"} # - There are not big difference amonge monthes in the flights number during the days of months, and its natural to be less half flights during 31th. # + [markdown] slideshow={"slide_type": "subslide"} # ### 04. Flight dataset DayOfWeek column: # range : 1 (Monday) - 7 (Sunday) # + slideshow={"slide_type": "skip"} # substitute number with actual day of week name days = ['', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] for i in flights.DayOfWeek.unique(): if str(i).isnumeric(): flights.DayOfWeek.replace(i,days[int(i)], inplace=True) # - days_type = pd.CategoricalDtype(categories=days[1:], ordered=True) flights['DayOfWeek'] = flights['DayOfWeek'].astype(days_type) # + slideshow={"slide_type": "skip"} flights.DayOfWeek.describe() # + slideshow={"slide_type": "skip"} plot_bar_str(flights, 'DayOfWeek', 200, base_color, figsize=(11, 5), title = 'The Number Of Flights Per Day', fontsize =14); plt.xlabel('Days',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); # + [markdown] slideshow={"slide_type": "subslide"} # #### Bar plot indicate the number of flights per day of week # + slideshow={"slide_type": "subslide"} plot_bar_str(flights, 'DayOfWeek', 200, base_color, rotation =45, log=True, figsize=(9,5), title = 'The Number Of Flights Per Day', fontsize =14) plt.xlabel('Days',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); # + [markdown] slideshow={"slide_type": "subslide"} # > There are not big difference amonge flights number during working days 'Tuesday', 'Wednesday', 'Thursday', 'Friday' , and less flights during Sunday , and more less in Saturday. # + [markdown] slideshow={"slide_type": "subslide"} # ### 05. Flight dataset DepTime column: # range : 00:00 AM-11:59 PM # actual departure time (local, hh:mm AM/PM) # + slideshow={"slide_type": "subslide"} flights.DepTime.describe() # + slideshow={"slide_type": "skip"} # sort time in ascending order with its frequency df = sort_time_fmt(flights, 'DepTime') # + slideshow={"slide_type": "skip"} df[df.frequency == df.frequency.max()] # + [markdown] slideshow={"slide_type": "subslide"} # ### line plot indicate the number of actual departed flights over the day time # + slideshow={"slide_type": "subslide"} plot_line_time_fmt(df, 'DepTime', 'frequency', figsize=(12, 4)); plt.xlabel('Time',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); plt.title('Number Of Flights vs time of day',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # > peak crowd start from 05:00 AM to 6:00 PM # + [markdown] slideshow={"slide_type": "subslide"} # ### 06. Flight dataset CRSDepTime column: # range : 00:00 AM-11:59 PM # scheduled departure time (local, hh:mm AM/PM) # + slideshow={"slide_type": "skip"} flights.CRSDepTime.describe() # + slideshow={"slide_type": "skip"} df = sort_time_fmt(flights, 'CRSDepTime') df.head() # + slideshow={"slide_type": "skip"} df[df.frequency == df.frequency.max()] # + [markdown] slideshow={"slide_type": "subslide"} # ### line plot indicate the number of scheduled departed flights over the day time # + slideshow={"slide_type": "subslide"} plot_line_time_fmt(df, 'CRSDepTime', 'frequency', figsize=(12, 4)) plt.xlabel('Time',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); plt.title('Number Of scheduled departed Flights vs time of day',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # > peak crowd start from 05:00 AM to 6:00 PM # + [markdown] slideshow={"slide_type": "subslide"} # ### 07. Flight dataset ArrTime column: # range : 00:00 AM-11:59 PM # actual arrival time (local, hh:mm AM/PM) # + slideshow={"slide_type": "skip"} flights.ArrTime.describe() # + slideshow={"slide_type": "skip"} df = sort_time_fmt(flights, 'ArrTime') df.head() # + slideshow={"slide_type": "skip"} df[df.frequency == df.frequency.max()] # + [markdown] slideshow={"slide_type": "subslide"} # ### line plot indicate the number of actual arrival flights over the day time # + slideshow={"slide_type": "subslide"} plot_line_time_fmt(df, 'ArrTime', 'frequency', figsize=(12, 4)); plt.xlabel('Time',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); plt.title('Number Of actual arrival Flights vs time of day',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # > peak crowd start from 06:00 AM to 6:00 PM # + [markdown] slideshow={"slide_type": "subslide"} # ### 08. Flight dataset CRSArrTime column: # range : 00:00 AM-11:59 PM # scheduled arrival time (local, hh:mm AM/PM) # + slideshow={"slide_type": "subslide"} flights.CRSArrTime.describe() # + slideshow={"slide_type": "skip"} df = sort_time_fmt(flights, 'CRSArrTime') df.head() # + slideshow={"slide_type": "skip"} df[df.frequency == df.frequency.max()] # + [markdown] slideshow={"slide_type": "subslide"} # ### line plot indicate the number of scheduled arrival flights over the day time # + slideshow={"slide_type": "subslide"} plot_line_time_fmt(df, 'CRSArrTime', 'frequency', figsize=(12, 4)) plt.xlabel('Time',fontsize =12); plt.ylabel('Number Of Flights',fontsize =12); plt.title('Number Of scheduled arrival Flights vs time of day',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # > peak crowd start from 10:00 AM to 08:00 PM # + [markdown] slideshow={"slide_type": "subslide"} # ### 09. Flight dataset UniqueCarrier column: # range : 1 - 20 string # unique carrier code # + slideshow={"slide_type": "skip"} flights.UniqueCarrier.describe() # + [markdown] slideshow={"slide_type": "subslide"} # #### Bar plot indicate the number of flights per carrier # + slideshow={"slide_type": "subslide"} plot_bar_str(flights, 'UniqueCarrier', 200, base_color, figsize=(15, 4), rotation =0, log=True); plt.ylabel('Number Of Flights',fontsize =12); plt.title('Number Of Flights per carrier',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # > apperantly WN carrier have the most share of flights about 1,186,911 flight. # + slideshow={"slide_type": "skip"} plot_bar_str(flights, 'UniqueCarrier', 200, base_color, figsize=(10, 4), rotation =0); plt.ylabel('Number Of Flights',fontsize =12); plt.title('Number Of Flights per carrier',fontsize =14); # + [markdown] slideshow={"slide_type": "subslide"} # ### 10. Flight dataset FlightNum column: # range : string # flight number # + slideshow={"slide_type": "subslide"} flights.FlightNum.describe() # + [markdown] slideshow={"slide_type": "subslide"} # ### 11. Flight dataset TailNum column: # range : string # plane tail number : # aircraft registration, unique aircraft identifier # + slideshow={"slide_type": "subslide"} flights.TailNum.describe() # + [markdown] slideshow={"slide_type": "skip"} # # keep the orignal then remove outliers # + slideshow={"slide_type": "skip"} flights_orignal = flights.copy() # + slideshow={"slide_type": "skip"} #flights = flights_orignal # + [markdown] slideshow={"slide_type": "skip"} # outliers = (((df.DepDelay - df.DepDelay.mean()).abs() > df.DepDelay.std()*3) | # ((df.ArrDelay - df.ArrDelay.mean()).abs() > df.ArrDelay.std()*3)) # + slideshow={"slide_type": "skip"} def outlier(df, col): df_non_outlier = df[(df[col] >= (2.5*df[col].quantile(.25)-1.5*df[col].quantile(.75))) & (df[col] <= (2.5*df[col].quantile(.75)-1.5*df[col].quantile(.25)))] return df_non_outlier # + [markdown] slideshow={"slide_type": "subslide"} # ### 12. Flight dataset ActualElapsedTime column: # range : in minutes # ActualElapsedTime # + slideshow={"slide_type": "subslide"} flights.ActualElapsedTime.describe().round(2) # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'ActualElapsedTime') # + slideshow={"slide_type": "skip"} flights.ActualElapsedTime.describe().round(2) # + slideshow={"slide_type": "skip"} flights['ActualElapsedTimePmile'] = flights['ActualElapsedTime'].astype(float)/ flights['Distance'].astype(float) # + slideshow={"slide_type": "skip"} flights.ActualElapsedTimePmile.describe().round(3) # + slideshow={"slide_type": "skip"} bins = np.arange(0,flights['ActualElapsedTimePmile'].max()+0.1,0.1) plt.hist(data = flights, x= 'ActualElapsedTimePmile', bins=bins); plt.xlim(right=1); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} bins = np.arange(0,flights['ActualElapsedTimePmile'].max()+0.1,0.1) plt.hist(data = flights, x= 'ActualElapsedTimePmile', bins=bins); plt.xlim(right=4); # adjust the right leaving left unchanged plt.xlim(left=-0.5); # adjust the left leaving right unchanged plt.yscale('log') # + [markdown] slideshow={"slide_type": "subslide"} # #### histogram indicate the density of Actual ElapsedTime for flights per distance # + slideshow={"slide_type": "skip"} plot_hist(flights['ActualElapsedTimePmile'], 1, scale=1) # + slideshow={"slide_type": "skip"} sns.boxplot(x=flights['ActualElapsedTimePmile']); plt.xlim(right=0.4); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} flights.boxplot(column=['ActualElapsedTimePmile']); plt.ylim(top=0.4); # adjust the top leaving bottom unchanged plt.ylim(bottom=0); # adjust the bottom leaving top unchanged # + [markdown] slideshow={"slide_type": "subslide"} # ### 13. Flight dataset CRSElapsedTime column: # range : in minutes # CRSElapsedTime # + slideshow={"slide_type": "skip"} flights.CRSElapsedTime.describe().round(1) # + slideshow={"slide_type": "skip"} flights[flights['CRSElapsedTime'] <0] # + [markdown] slideshow={"slide_type": "skip"} # #### remove incorrect data # + slideshow={"slide_type": "skip"} flights = flights[~(flights['CRSElapsedTime'] <0)] # + slideshow={"slide_type": "skip"} flights.CRSElapsedTime.describe().round(1) # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'CRSElapsedTime') # + slideshow={"slide_type": "skip"} flights.CRSElapsedTime.describe().round(1) # + slideshow={"slide_type": "skip"} flights['CRSElapsedTimePmile'] = flights['CRSElapsedTime'].astype(float)/ flights['Distance'].astype(float) # + slideshow={"slide_type": "skip"} flights.ActualElapsedTimePmile.describe().round(3) # + slideshow={"slide_type": "skip"} bins = np.arange(0,flights['CRSElapsedTimePmile'].max()+0.1,0.1) plt.hist(data = flights, x= 'CRSElapsedTimePmile', bins=bins); plt.xlim(right=1); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + [markdown] slideshow={"slide_type": "subslide"} # #### histogram indicate the density of scheduled ElapsedTime for flights per distance # + slideshow={"slide_type": "subslide"} plot_hist(flights['CRSElapsedTimePmile'], 1, scale=1) # + slideshow={"slide_type": "skip"} sns.distplot(flights['CRSElapsedTimePmile']); plt.xlim(right=1); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} sns.boxplot(x=flights['CRSElapsedTimePmile']); plt.xlim(right=0.5); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} flights.boxplot(column=['CRSElapsedTimePmile']); plt.ylim(top=0.5); # adjust the top leaving bottom unchanged plt.ylim(bottom=0); # adjust the bottom leaving top unchanged # + [markdown] slideshow={"slide_type": "subslide"} # ### 14. Flight dataset AirTime column: # range : in minutes # + slideshow={"slide_type": "skip"} flights.AirTime.describe().round(1) # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'AirTime') # + slideshow={"slide_type": "skip"} flights.AirTime.describe().round(1) # + slideshow={"slide_type": "skip"} flights['AirTimePmile'] = flights['AirTime'].astype(float)/ flights['Distance'].astype(float) # + slideshow={"slide_type": "skip"} flights.AirTimePmile.describe().round(3) # + slideshow={"slide_type": "skip"} bins = np.arange(0,flights['AirTimePmile'].max()+0.1,0.1) plt.hist(data = flights, x= 'AirTimePmile', bins=bins); plt.xlim(right=1); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + [markdown] slideshow={"slide_type": "subslide"} # #### histogram indicate the density of arive Time for flights per distance # + slideshow={"slide_type": "subslide"} plot_hist(flights['AirTimePmile'], 1, scale=1) # + slideshow={"slide_type": "skip"} sns.distplot(flights['AirTimePmile']); plt.xlim(right=1); # adjust the right leaving left unchanged plt.xlim(left=0) # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} sns.boxplot(x=flights['AirTimePmile']); plt.xlim(right=0.35); # adjust the right leaving left unchanged plt.xlim(left=0); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} flights.boxplot(column=['AirTimePmile']); plt.ylim(top=0.35); # adjust the top leaving bottom unchanged plt.ylim(bottom=0); # adjust the bottom leaving top unchanged # + [markdown] slideshow={"slide_type": "subslide"} # ### 15. Flight dataset ArrDelay column: # range : in minutes # arrival delay: # A flight is counted as "on time" if it operated less than 15 minutes later the scheduled time shown in the carriers' Computerized Reservations Systems (CRS). # + slideshow={"slide_type": "skip"} flights.ArrDelay.describe().round(1) # + slideshow={"slide_type": "skip"} flights[flights['ArrDelay'] <15].shape # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'ArrDelay') # + slideshow={"slide_type": "skip"} flights.ArrDelay.describe().round(1) # + slideshow={"slide_type": "skip"} plt.hist(data = flights, x= 'ArrDelay'); #plt.xlim(right=500); # adjust the right leaving left unchanged plt.xlim(left=-60); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} plt.hist(data = flights, x= 'ArrDelay'); #plt.xlim(right=500); # adjust the right leaving left unchanged plt.xlim(left=-60); # adjust the left leaving right unchanged plt.yscale('log') # + slideshow={"slide_type": "skip"} plot_hist(flights['AirTimePmile'], .1) # + [markdown] slideshow={"slide_type": "subslide"} # #### histogram indicate the density of arive delayTime for flights per distance # + slideshow={"slide_type": "subslide"} sns.distplot(flights['ArrDelay']); #plt.xlim(right=12); # adjust the right leaving left unchanged #plt.xlim(left=-700); # adjust the left leaving right unchanged #plt.yscale('log') # + slideshow={"slide_type": "skip"} sns.boxplot(x=flights['ArrDelay']); plt.xlim(right=50); # adjust the right leaving left unchanged plt.xlim(left=-50); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} flights.boxplot(column=['ArrDelay']); plt.ylim(top=50); # adjust the top leaving bottom unchanged plt.ylim(bottom=-50); # adjust the bottom leaving top unchanged # + [markdown] slideshow={"slide_type": "subslide"} # ### 16. Flight dataset DepDelay column: # range : in minutes # departure delay: # + slideshow={"slide_type": "subslide"} flights.DepDelay.describe().round(1) # + slideshow={"slide_type": "skip"} flights[flights['DepDelay'] <0].shape # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'DepDelay') # + slideshow={"slide_type": "skip"} flights.DepDelay.describe().round(1) # + slideshow={"slide_type": "skip"} plt.hist(data = flights, x= 'DepDelay'); #plt.xlim(right=1); # adjust the right leaving left unchanged #plt.xlim(left=0); # adjust the left leaving right unchanged # + [markdown] slideshow={"slide_type": "subslide"} # #### histogram indicate the density of depate delay Time for flights per distance # + slideshow={"slide_type": "subslide"} plot_hist(flights['AirTimePmile'], .1) # + slideshow={"slide_type": "skip"} sns.distplot(flights['DepDelay']); #plt.xlim(right=1); # adjust the right leaving left unchanged #plt.xlim(left=0) # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} sns.boxplot(x=flights['DepDelay']); #plt.xlim(right=1); # adjust the right leaving left unchanged #plt.xlim(left=0); # adjust the left leaving right unchanged # + slideshow={"slide_type": "skip"} flights.boxplot(column=['DepDelay']); #plt.ylim(top=1); # adjust the top leaving bottom unchanged #plt.ylim(bottom=0); # adjust the bottom leaving top unchanged # + [markdown] slideshow={"slide_type": "subslide"} # ### 17. Flight dataset Origin column: # range : string # origin IATA airport code # + slideshow={"slide_type": "skip"} flights.Origin.describe() # + [markdown] slideshow={"slide_type": "subslide"} # ### 18. Flight dataset Dest column: # range : string # destination IATA airport code # + slideshow={"slide_type": "skip"} flights.Dest.describe() # + [markdown] slideshow={"slide_type": "subslide"} # ### 19. Flight dataset Distance column: # range : in miles # Distance # + slideshow={"slide_type": "skip"} flights.Distance.describe().round(1) # + [markdown] slideshow={"slide_type": "subslide"} # ### 20. Flight dataset TaxiIn column: # range : in minutes # taxi in time # + slideshow={"slide_type": "skip"} flights.TaxiIn.describe().round(1) # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'TaxiIn') # + slideshow={"slide_type": "skip"} flights.TaxiIn.describe().round(1) # + [markdown] slideshow={"slide_type": "subslide"} # ### 21. Flight dataset TaxiOut column: # range : in minutes # taxi out time # + slideshow={"slide_type": "subslide"} flights.TaxiOut.describe().round(1) # + slideshow={"slide_type": "skip"} # remove outliers flights = outlier(flights, 'TaxiOut') # + slideshow={"slide_type": "skip"} flights.TaxiOut.describe().round(1) # + [markdown] slideshow={"slide_type": "subslide"} # ### 25. Flight dataset CarrierDelay column: # range : in minutes # - Carrier delay is within the control of the air carrier. # - Examples of occurrences that may determine carrier delay are: # - aircraft cleaning, aircraft damage, awaiting the arrival of connecting passengers or crew, # - baggage, bird strike, cargo loading, catering, computer, outage-carrier equipment, # - crew legality (pilot or attendant rest), damage by hazardous goods, engineering inspection, fueling, # - handling disabled passengers, late crew, lavatory servicing, maintenance, oversales, # - potable water servicing, removal of unruly passenger, slow boarding or seating, # - stowing carry-on baggage, weight and balance delays. # + slideshow={"slide_type": "subslide"} flights.CarrierDelay.describe().round(3) # + [markdown] slideshow={"slide_type": "subslide"} # ### 26. Flight dataset WeatherDelay column: # range : in minutes # Weather delay is caused by extreme or hazardous weather conditions that are forecasted or # manifest themselves on point of departure, enroute, or on point of arrival. # # + slideshow={"slide_type": "subslide"} flights.WeatherDelay.describe().round(3) # + [markdown] slideshow={"slide_type": "subslide"} # ### 27. Flight dataset NASDelay column: # range : in minutes # Delay that is within the control of the National Airspace System (NAS) may include: # non-extreme weather conditions, airport operations, heavy traffic volume, air traffic control, etc. # + slideshow={"slide_type": "subslide"} flights.NASDelay.describe().round(3) # + [markdown] slideshow={"slide_type": "subslide"} # ### 28. Flight dataset SecurityDelay column: # range : in minutes # Security delay is caused by evacuation of a terminal or concourse, re-boarding of aircraft because of security breach, inoperative screening equipment and/or long lines in excess of 29 minutes at screening areas. # + slideshow={"slide_type": "subslide"} flights.SecurityDelay.describe().round(3) # + [markdown] slideshow={"slide_type": "subslide"} # ### 29. Flight dataset LateAircraftDelay column: # range : in minutes # Arrival delay at an airport due to the late arrival of the same aircraft at a previous airport. # The ripple effect of an earlier delay at downstream airports is referred to as delay propagation. # + slideshow={"slide_type": "subslide"} flights.LateAircraftDelay.describe().round(3) # + [markdown] slideshow={"slide_type": "subslide"} # ## correlation matrix # + slideshow={"slide_type": "subslide"} corrmat = flights.corr() f, ax = plt.subplots(figsize=(15, 10)) sns.heatmap(corrmat, vmin=-1, square=True, annot=True, fmt='.2f', cmap='vlag_r', center=0); # + [markdown] slideshow={"slide_type": "skip"} # ## Saving cleaned data after romvoing outliers and duplicates # + slideshow={"slide_type": "skip"} flights.columns # + slideshow={"slide_type": "skip"} # drop some columns flights = flights[['Month', 'DayofMonth', 'DayOfWeek', 'DepTime', 'CRSDepTime', 'ArrTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'TailNum', 'ActualElapsedTime', 'CRSElapsedTime', 'AirTime', 'ArrDelay', 'DepDelay', 'Origin', 'Dest', 'Distance', 'CarrierDelay', 'WeatherDelay', 'NASDelay', 'SecurityDelay', 'LateAircraftDelay']] # + slideshow={"slide_type": "skip"} # saving data t1 = time.time() flights.to_csv('../data/processed/{}.csv'.format('flights'), index=False) t2 = time.time() print('Elapsed saving time :', t2-t1) # + slideshow={"slide_type": "skip"} cancelled.columns # + slideshow={"slide_type": "skip"} # drop some columns cancelled = cancelled[['Month', 'DayofMonth', 'DayOfWeek', 'CRSDepTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'Origin', 'Dest', 'Distance', 'CancellationCode']] # + slideshow={"slide_type": "skip"} # saving data t1 = time.time() cancelled.to_csv('../data/processed/{}.csv'.format('cancelled'), index=False) t2 = time.time() print('Elapsed saving time :', t2-t1) # + slideshow={"slide_type": "skip"} diverted.columns # + slideshow={"slide_type": "skip"} # drop some columns diverted = diverted[['Month', 'DayofMonth', 'DayOfWeek', 'DepTime', 'CRSDepTime', 'UniqueCarrier', 'FlightNum', 'DepDelay', 'Origin', 'Dest', 'Distance']] # + slideshow={"slide_type": "skip"} # saving data t1 = time.time() diverted.to_csv('../data/processed/{}.csv'.format('diverted'), index=False) t2 = time.time() print('Elapsed saving time :', t2-t1) # + slideshow={"slide_type": "skip"} # !jupyter nbconvert exploration.ipynb --to slides --post serve --no-input --no-prompt # + [markdown] slideshow={"slide_type": "skip"} # > At the end of your report, make sure that you export the notebook as an # html file from the `File > Download as... > HTML` menu. Make sure you keep # track of where the exported file goes, so you can put it in the same folder # as this notebook for project submission. Also, make sure you remove all of # the quote-formatted guide notes like this one before you finish your report!
notebooks/exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # More Tests # # **<NAME>** # # **Mar 2021** # # Basic tests for Mie scattering in a Jupyter notebook collected in one place. # # *If RigolWFM is not installed, uncomment the following cell (i.e., delete the #) and run (shift-enter)* # + # #!pip install --user miepython # + import numpy as np import unittest try: from miepython import * #testing private functions except ModuleNotFoundError: print('miepython not installed. To install, uncomment and run the cell above.') print('Once installation is successful, rerun this cell again.') # + class low_level(unittest.TestCase): def test_01_log_derivatives(self): x = 62 m = 1.28 - 1.37j nstop = 50 dn = miepython._D_calc(m,x,nstop) self.assertAlmostEqual(dn[10].real, 0.004087, delta=0.00001) self.assertAlmostEqual(dn[10].imag, 1.0002620, delta=0.00001) def test_02_an_bn(self): # Test values from <NAME>'s Mie Scattering in Mathematica # imaginary parts are negative because different sign convention m = 4.0/3.0 x = 50 a, b = miepython._mie_An_Bn(m,x) self.assertAlmostEqual( a[0].real, 0.5311058892948411929, delta=0.00000001) self.assertAlmostEqual(-a[0].imag,-0.4990314856310943073, delta=0.00000001) self.assertAlmostEqual( b[0].real, 0.7919244759352004773, delta=0.00001) self.assertAlmostEqual(-b[0].imag,-0.4059311522289938238, delta=0.00001) m = 1.5-1j x = 2 a, b = miepython._mie_An_Bn(m,x) self.assertAlmostEqual( a[0].real, 0.5465202033970914511, delta=0.00000001) self.assertAlmostEqual(-a[0].imag,-0.1523738572575972279, delta=0.00000001) self.assertAlmostEqual( b[0].real, 0.3897147278879423235, delta=0.00001) self.assertAlmostEqual(-b[0].imag, 0.2278960752564908264, delta=0.00001) m = 1.1-25j x = 2 a, b = miepython._mie_An_Bn(m,x) self.assertAlmostEqual(a[1].real, 0.324433578437, delta=0.0001) self.assertAlmostEqual(a[1].imag, 0.465627763266, delta=0.0001) self.assertAlmostEqual(b[1].real, 0.060464399088, delta=0.0001) self.assertAlmostEqual(b[1].imag,-0.236805417045, delta=0.0001) class non_absorbing(unittest.TestCase): def test_03_bh_dielectric(self): m = 1.55 lambda0 = 0.6328 radius = 0.525 x = 2*np.pi*radius/lambda0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 3.10543, delta=0.00001) self.assertAlmostEqual(qsca, 3.10543, delta=0.00001) self.assertAlmostEqual(qback,2.92534, delta=0.00001) self.assertAlmostEqual(g ,0.63314, delta=0.00001) def test_05_wiscombe_non_absorbing(self): # MIEV0 Test Case 5 m=complex(0.75, 0.0) x=0.099 s1 = 1.81756e-8 - 1.64810e-4 * 1j G=abs(2*s1/x)**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.000007, delta=1e-6) self.assertAlmostEqual(g, 0.001448, delta=1e-6) self.assertAlmostEqual(qback, G, delta=1e-6) # MIEV0 Test Case 6 m=complex(0.75, 0.0) x=0.101 s1 = 2.04875E-08 -1.74965E-04 * 1j G=abs(2*s1/x)**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.000008, delta=1e-6) self.assertAlmostEqual(g, 0.001507, delta=1e-6) self.assertAlmostEqual(qback, G, delta=1e-6) # MIEV0 Test Case 7 m=complex(0.75, 0.0) x=10.0 s1 = -1.07857E+00 -3.60881E-02 * 1j G=abs(2*s1/x)**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.232265, delta=1e-6) self.assertAlmostEqual(g, 0.896473, delta=1e-6) self.assertAlmostEqual(qback, G, delta=1e-6) # MIEV0 Test Case 8 m=complex(0.75, 0.0) x=1000.0 s1= 1.70578E+01 + 4.84251E+02 *1j G=abs(2*s1/x)**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 1.997908, delta=1e-6) self.assertAlmostEqual(g, 0.844944, delta=1e-6) self.assertAlmostEqual(qback, G, delta=1e-6) def test_05_old_wiscombe_non_absorbing(self): # OLD MIEV0 Test Case 1 m=complex(1.5, 0.0) x=10 s1 = 4.322E+00 + 4.868E+00 * 1j G=abs(2*s1/x)**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.8820, delta=1e-4) self.assertAlmostEqual(qback, G, delta=1e-4) # OLD MIEV0 Test Case 2 m=complex(1.5, 0.0) x=100 s1 = 4.077E+01 + 5.175E+01 * 1j G=abs(2*s1/x)**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.0944, delta=1e-4) self.assertAlmostEqual(qback, G, delta=1e-4) # OLD MIEV0 Test Case 3 m=complex(1.5, 0.0) x=1000 G= 4 * 2.576E+06 / x**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.0139, delta=1e-4) self.assertAlmostEqual(qback, G, delta=1e-3) # OLD MIEV0 Test Case 4 m=complex(1.5, 0.0) x=5000.0 G= 4 * 2.378E+08 / x**2 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.0086, delta=1e-4) self.assertAlmostEqual(qback, G, delta=3e-3) def test_04_non_dielectric(self): m = 1.55-0.1j lambda0 = 0.6328 radius = 0.525 x = 2*np.pi*radius/lambda0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 2.86165188243, delta=1e-7) self.assertAlmostEqual(qsca, 1.66424911991, delta=1e-7) self.assertAlmostEqual(qback,0.20599534080, delta=1e-7) self.assertAlmostEqual(g, 0.80128972639, delta=1e-7) class absorbing(unittest.TestCase): def test_06_wiscombe_water_absorbing(self): #MIEV0 Test Case 9 m=complex(1.33, -0.00001) x=1.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.093923, delta=1e-6) self.assertAlmostEqual(g, 0.184517, delta=1e-6) #MIEV0 Test Case 10 m=complex(1.33, -0.00001) x=100.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.096594, delta=1e-6) self.assertAlmostEqual(g, 0.868959, delta=1e-6) #MIEV0 Test Case 11 m=complex(1.33, -0.00001) x=10000.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(g, 0.907840, delta=1e-6) self.assertAlmostEqual(qsca, 1.723857, delta=1e-6) def test_07_wiscombe_absorbing(self): #MIEV0 Test Case 12 m = 1.5-1j x = 0.055 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.000011, delta=1e-6) self.assertAlmostEqual(g, 0.000491, delta=1e-6) #MIEV0 Test Case 13 m = 1.5-1j x = 0.056 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.000012, delta=1e-6) self.assertAlmostEqual(g, 0.000509, delta=1e-6) #MIEV0 Test Case 14 m = 1.5-1j x = 1 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.6634538, delta=1e-6) self.assertAlmostEqual(g, 0.192136, delta=1e-6) #MIEV0 Test Case 15 m = 1.5-1j x = 100 x=100.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 1.283697, delta=1e-3) self.assertAlmostEqual(qext, 2.097502, delta=1e-2) self.assertAlmostEqual(g, 0.850252, delta=1e-3) #MIEV0 Test Case 16 m = 1.5-1j x = 10000 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 1.236575, delta=1e-6) self.assertAlmostEqual(qext, 2.004368, delta=1e-6) self.assertAlmostEqual(g, 0.846309, delta=1e-6) def test_08_wiscombe_more_absorbing(self): #MIEV0 Test Case 17 m = 10.0 - 10.0j x = 1.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.049405, delta=1e-6) self.assertAlmostEqual(g, -0.110664, delta=1e-6) #MIEV0 Test Case 18 m = 10.0 - 10.0j x = 100.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 1.836785, delta=1e-6) self.assertAlmostEqual(g, 0.556215, delta=1e-6) #MIEV0 Test Case 19 m = 10.0 - 10.0j x = 10000.0 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 1.795393, delta=1e-6) self.assertAlmostEqual(g, 0.548194, delta=1e-6) def test_09_single_nonmagnetic(self): m = 1.5-0.5j x = 2.5 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 2.562873497454734, delta=1e-7) self.assertAlmostEqual(qsca, 1.097071819088392, delta=1e-7) self.assertAlmostEqual(qback,0.123586468179818, delta=1e-7) self.assertAlmostEqual(g, 0.748905978948507, delta=1e-7) class perfectly_reflecting(unittest.TestCase): def test_11_wiscombe_perfectly_reflecting(self): # MIEV0 Test Case 0 m=0 x=0.001 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 3.3333E-12, delta=1e-13) # MIEV0 Test Case 1 m=0 x=0.099 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.000321, delta=1e-4) self.assertAlmostEqual(g, -0.397357, delta=1e-3) # MIEV0 Test Case 2 m=0 x=0.101 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 0.000348, delta=1e-6) self.assertAlmostEqual(g, -0.397262, delta=1e-6) # MIEV0 Test Case 3 m=0 x=100 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.008102, delta=1e-6) self.assertAlmostEqual(g, 0.500926, delta=1e-6) # MIEV0 Test Case 4 m=0 x=10000 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qsca, 2.000289, delta=1e-6) self.assertAlmostEqual(g, 0.500070, delta=1e-6) class small(unittest.TestCase): def test_10_small_spheres(self): # MIEV0 Test Case 5 m = 0.75 x = 0.099 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.000007, delta=1e-6) self.assertAlmostEqual(g, 0.001448, delta=1e-6) # MIEV0 Test Case 6 m = 0.75 x=0.101 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.000008, delta=1e-6) self.assertAlmostEqual(g, 0.001507, delta=1e-6) m = 1.5 -1j x = 0.055 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.101491, delta=1e-6) self.assertAlmostEqual(g, 0.000491, delta=1e-6) x=0.056 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.103347, delta=1e-6) self.assertAlmostEqual(g, 0.000509, delta=1e-6) m = 1e-10 - 1e10j x=0.099 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.000321, delta=1e-6) self.assertAlmostEqual(g, -0.397357, delta=1e-4) x=0.101 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.000348, delta=1e-6) self.assertAlmostEqual(g, -0.397262, delta=1e-6) m = 0 - 1e10j x=0.099 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.000321, delta=1e-6) self.assertAlmostEqual(g, -0.397357, delta=1e-4) x=0.101 qext, qsca, qback, g = miepython.mie(m,x) self.assertAlmostEqual(qext, 0.000348, delta=1e-6) self.assertAlmostEqual(g, -0.397262, delta=1e-4) class angle_scattering(unittest.TestCase): def test_12_scatter_function(self): x=1.0 m=1.5-1.0j theta = np.arange(0,181,30) mu = np.cos(theta * np.pi/180) qext, qsca, qback, g = miepython.mie(m,x) S1, S2 = miepython.mie_S1_S2(m,x,mu) S1 *= np.sqrt(np.pi*x**2*qext) S2 *= np.sqrt(np.pi*x**2*qext) self.assertAlmostEqual(S1[0].real, 0.584080, delta=1e-6) self.assertAlmostEqual(S1[0].imag, 0.190515, delta=1e-6) self.assertAlmostEqual(S2[0].real, 0.584080, delta=1e-6) self.assertAlmostEqual(S2[0].imag, 0.190515, delta=1e-6) self.assertAlmostEqual(S1[1].real, 0.565702, delta=1e-6) self.assertAlmostEqual(S1[1].imag, 0.187200, delta=1e-6) self.assertAlmostEqual(S2[1].real, 0.500161, delta=1e-6) self.assertAlmostEqual(S2[1].imag, 0.145611, delta=1e-6) self.assertAlmostEqual(S1[2].real, 0.517525, delta=1e-6) self.assertAlmostEqual(S1[2].imag, 0.178443, delta=1e-6) self.assertAlmostEqual(S2[2].real, 0.287964, delta=1e-6) self.assertAlmostEqual(S2[2].imag, 0.041054, delta=1e-6) self.assertAlmostEqual(S1[3].real, 0.456340, delta=1e-6) self.assertAlmostEqual(S1[3].imag, 0.167167, delta=1e-6) self.assertAlmostEqual(S2[3].real, 0.0362285, delta=1e-6) self.assertAlmostEqual(S2[3].imag,-0.0618265, delta=1e-6) self.assertAlmostEqual(S1[4].real, 0.400212, delta=1e-6) self.assertAlmostEqual(S1[4].imag, 0.156643, delta=1e-6) self.assertAlmostEqual(S2[4].real,-0.174875, delta=1e-6) self.assertAlmostEqual(S2[4].imag,-0.122959, delta=1e-6) self.assertAlmostEqual(S1[5].real, 0.362157, delta=1e-6) self.assertAlmostEqual(S1[5].imag, 0.149391, delta=1e-6) self.assertAlmostEqual(S2[5].real,-0.305682, delta=1e-6) self.assertAlmostEqual(S2[5].imag,-0.143846, delta=1e-6) self.assertAlmostEqual(S1[6].real, 0.348844, delta=1e-6) self.assertAlmostEqual(S1[6].imag, 0.146829, delta=1e-6) self.assertAlmostEqual(S2[6].real,-0.348844, delta=1e-6) self.assertAlmostEqual(S2[6].imag,-0.146829, delta=1e-6) unittest.main(argv=[''], verbosity=2, exit=False) # -
docs/10_basic_tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import cffi import numpy as np from pynq import MMIO from pynq import Overlay from pynq import PL from pynq.drivers import DMA from time import sleep, time # Classifier Dimensions BATCH = 8192 FEAT = 256 CLASSES = 10 # Addresses ACCEL_CTRL = 0x43C00000 AXI_DMA_0 = 0x40400000 AXI_DMA_1 = 0x40410000 AXI_TIMER = 0x42800000 # C FFI ffi = cffi.FFI() # DMA Configs DMAConfig1 = { 'DeviceId' : 0, 'BaseAddr' : ffi.cast("uint32_t *",AXI_DMA_0), 'HasStsCntrlStrm' : 0, 'HasMm2S' : 1, 'HasMm2SDRE' : 1, 'Mm2SDataWidth' : 64, 'HasS2Mm' : 0, 'HasS2MmDRE' : 0, 'S2MmDataWidth' : 32, 'HasSg' : 0, 'Mm2sNumChannels' : 1, 'S2MmNumChannels' : 1, 'Mm2SBurstSize' : 256, 'S2MmBurstSize' : 16, 'MicroDmaMode' : 0, 'AddrWidth' : 32 } DMAConfig2 = { 'DeviceId' : 1, 'BaseAddr' : ffi.cast("uint32_t *",AXI_DMA_1), 'HasStsCntrlStrm' : 0, 'HasMm2S' : 0, 'HasMm2SDRE' : 0, 'Mm2SDataWidth' : 32, 'HasS2Mm' : 1, 'HasS2MmDRE' : 1, 'S2MmDataWidth' : 64, 'HasSg' : 0, 'Mm2sNumChannels' : 1, 'S2MmNumChannels' : 1, 'Mm2SBurstSize' : 16, 'S2MmBurstSize' : 256, 'MicroDmaMode' : 0, 'AddrWidth' : 32 } # Download the custom overlay ol = Overlay("classifier_fixed.bit") ol.download() # Initialize HLS IP mmult_ip = MMIO(ACCEL_CTRL,0x10000) # Start the accelerator ctrl=mmult_ip.read(0x00)&0x08 mmult_ip.write(0x00, (ctrl|0x81)) ctrl=mmult_ip.read(0x00) hex(ctrl) # Initialize DMA1 (mem to FPGA) dma1 = DMA(AXI_DMA_0, direction=0, attr_dict=DMAConfig1) dma1.create_buf((CLASSES*4+CLASSES*FEAT+BATCH*FEAT), cacheable=0) # Initialize DMA2 (FPGA to mem) dma2 = DMA(AXI_DMA_1, direction=1, attr_dict=DMAConfig2) dma2.create_buf(BATCH*CLASSES*4, cacheable=0) # Start DMA transfer from FPGA to memory dma2.transfer(BATCH*CLASSES*4, direction=1) # + deletable=true editable=true # Initialize offsets, weights and inputs o = np.load('model_offsets_fixed.npy').astype(np.int32) w = np.load('model_weights_fixed.npy').astype(np.int8) i = np.load('test_data.npy').astype(np.uint8)[0:BATCH] l = np.load('test_labels.npy').astype(np.int32)[0:BATCH] # + deletable=true editable=true # Move offset, weight and input data to DMA buffer ffi.memmove(dma1.get_buf(), ffi.cast("uint32_t *", o.ctypes.data), CLASSES*4) ffi.memmove(dma1.get_buf()+CLASSES, ffi.cast("uint32_t *", w.ctypes.data), CLASSES*FEAT) ffi.memmove(dma1.get_buf()+CLASSES+(CLASSES*FEAT)//4, ffi.cast("uint32_t *", i.ctypes.data), BATCH*FEAT) # Perform FPGA offloading start_t = time() dma1.transfer(CLASSES*4+CLASSES*FEAT+BATCH*FEAT, direction=0) dma2.wait() fpga_time = time()-start_t # Dump FPGA result to a numpy array c = np.frombuffer(ffi.buffer( dma2.get_buf(),BATCH*CLASSES*4), dtype=np.int32).reshape(BATCH,CLASSES) # + deletable=true editable=true # Prepare input and weight matrices for matrix multiplication on CPU ones = np.ones(BATCH).reshape((BATCH,1)) i_p = np.append(ones, i, axis=1) w_p = np.append(o.reshape(CLASSES,1), w, axis=1) # Compute CPU result start_t = time() c_ref = np.dot(i_p,w_p.T) cpu_time = time()-start_t # + deletable=true editable=true # Evaluate validation accuracy cpu_errors = 0 fpga_errors = 0 for idx in range(BATCH): fpga_label = np.argmax(c[idx]) cpu_label = np.argmax(c_ref[idx]) actual_label = np.argmax(l[idx]) if (fpga_label!=actual_label): fpga_errors += 1. if (cpu_label!=actual_label): cpu_errors += 1. # Report results print("FPGA accuracy: {0:.2f}% validation error".format(fpga_errors/BATCH*100)) print("CPU accuracy: {0:.2f}% validation error".format(cpu_errors/BATCH*100)) if (cpu_time < fpga_time): print("FPGA has a {0:.2f}x slowdown".format(fpga_time/cpu_time)) else: print("FPGA has a {0:.2f}x speedup".format(cpu_time/fpga_time)) # + deletable=true editable=true # Render a given numpy 2D array of pixel data. def show(image): from matplotlib import pyplot import matplotlib as mpl fig = pyplot.figure() ax = fig.add_subplot(1,1,1) imgplot = ax.imshow(image, cmap=mpl.cm.Greys) imgplot.set_interpolation('nearest') ax.xaxis.set_ticks_position('top') ax.yaxis.set_ticks_position('left') pyplot.show() # Inspect one of the hand digits classified by the FPGA idx = 1 show(i[idx].reshape(16,16)) print("Classified as {} by the FPGA".format(np.argmax(c[idx])))
zynq/jupyter/classifier_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # layout: post # title: "Tags on AO3: Visualization with Pie Chart" # date: 2021-05-23 # category: visualization # tags: Python Pandas Matplotlib pie-chart table # --- # + [markdown] tags=[] # In this section, we're going to work on the other file from the AO3 data dump, the tags. # # * Table of Contents # {:toc} # - # # Loading File # + # Load Python library import pandas as pd # Load file path="/home/pi/Downloads/tags-20210226.csv" chunker = pd.read_csv(path, chunksize=10000) tags = pd.concat(chunker, ignore_index=True) # - # Preview file tags # # Exploring Data # Find tag types tags.type.unique() # Find subcategories of Media type tag tags[tags['type'] == 'Media'].name.unique() # # Cleaning and Manipulating Data # # Visualize the Media tags according to its cached_count # + # Visualize the Media tags according to its cached_count # Prepare data set # Select columns subset = tags[['type', 'name', 'cached_count']].copy() subset # + # Select all names that type is media media = subset[subset['type'] == 'Media'].drop('type',axis=1) media # - # # Matplotlib Pie Chart # + # Import libraries # Top line is Jupyter Notebook specific # %matplotlib inline import matplotlib.pyplot as plt # + # Visualization pie chart # with matplotlib explode = [0.1] * len(media.name) fig, axes = plt.subplots() ax = axes.pie(media.cached_count, labels=media.name, explode=explode, autopct='%1.1f%%') # - # # Modifying data set for a better graph # # Sort the data, find quantile, group lower values together # + # Sort the dataframe in descending order # Use inplace=True to edit the existing dataframe media.sort_values(by='cached_count',ascending=False, inplace=True, ignore_index=True) media # + # Find 50th quantile values qt_50 = media.cached_count.quantile(.50) qt_50 # - # Group values below 50th quantile # Find what rows to drop drop_rows = media[media['cached_count'] <= qt_50].copy() drop_rows # + # Find the sum of drop_rows values add_value = media[media['cached_count'] <= qt_50].cached_count.sum() add_value # + # Edit media dataframe # Add a new row Other # Add new value as cached_count media2 = media[media['cached_count'] > qt_50].append({'name':'Other','cached_count':add_value}, ignore_index=True) media2 # + # pie chart with new media2 dataframe explode = [0.1] * len(media2.name) fig,axes = plt.subplots() ax = axes.pie(media2.cached_count, labels=media2.name, explode=explode, autopct='%1.1f%%') # - # # Matplotlib Table # + # Test pandas.plotting.table() fig,ax = plt.subplots() ax = pd.plotting.table(ax,data=drop_rows) # - # Edit drop_rows dataframe # Add a new column of percentage drop_rows['Percentage'] = (drop_rows.cached_count / media.cached_count.sum() * 100).round(2) drop_rows # Drop cached_count # Change column name drop_rows.drop('cached_count', axis=1, inplace=True) drop_rows.columns = ['Other', 'Percentage'] drop_rows # # Plotting Pie Chart and Table # + # Plot pie chart # Plot table explode = [0.1] * len(media2.name) fig,axes = plt.subplots(figsize=(10, 8)) ax = axes.pie(media2.cached_count, labels=media2.name, explode=explode, autopct='%1.1f%%') table = pd.plotting.table(axes,data=drop_rows, cellLoc='center') # Aaethetics table.scale(1,1.5) # add padding around cell text ax = plt.title('AO3 Tags Breakdown By Media Type \n 2008-2021')
05-23-tags-pie-chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf2 # language: python # name: tf2 # --- import sklearn from numpy import loadtxt from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import datetime from datetime import datetime, date, timedelta import xgboost import pandas as pd from xgboost import XGBClassifier, XGBRegressor from sklearn.multioutput import MultiOutputRegressor import sklearn.metrics as metrics import numpy as np #import catboost as cb from sklearn.ensemble import AdaBoostRegressor import lightgbm as lgb from sklearn.model_selection import GridSearchCV from sklearn.ensemble import GradientBoostingRegressor # + data = pd.read_csv("M_GVB_weather_events_all_covidindex_holidays_binary2020.csv") data["Date_time"] = pd.to_datetime(data["Date_time"]) data['IS_METRO_OPEN'] = np.where((data['Hour'].isin([0,1,2,3,4,5])), 0, 1) # + def addlogs(res, cols): """ Log transform feature list""" m = res.shape[1] for c in cols: res = res.assign(newcol=pd.Series(np.log(1.01+res[c])).values) res.columns.values[m] = c + '_log' m += 1 return res loglist = ["Checked_out_passengers_week_ago","Checked_in_passengers_week_ago", "BezoekersVerwacht", "Expected visitors to arrive", "Expected visitors to leave"] data = addlogs(data, loglist) # - data.columns data = data.dropna() features = ["Checked_out_passengers_week_ago", "Checked_in_passengers_week_ago", 'BezoekersVerwacht','StringencyIndex', 'Event starting', 'Event ending', 'Temp(F)', 'Wind(MpH)', 'RainFall(in)', 'duration_rain_knmi', 'avg_wind_speed_knmi', 'avg_temp_knmi', 'sum_rain_knmi', 'global_radiation', 'C3_Cancel public events_0.0', 'C3_Cancel public events_1.0', 'C3_Cancel public events_2.0', 'weekday_0.0', 'weekday_1.0', 'weekday_2.0', 'weekday_3.0', 'weekday_4.0', 'weekday_5.0', 'weekday_6.0','Month', 'Hour', 'HOLIDAY_BINARY', 'VACATION_BINARY', 'Expected visitors to arrive', 'Expected visitors to leave','IS_METRO_OPEN','Checked_in_passengers_BASELINE', 'Checked_out_passengers_BASELINE'] # + def get_train_val_test_split(df): """ Create train, validation, and test split for 1-week ahead models. This means that the last week of the data will be used as a test set, the second-last will be the validation set, and the rest will be the training set. """ set_testing_date = datetime(2021,11,21,23) #set_testing_date = df['datetime'].max() last_week = pd.date_range(set_testing_date - timedelta(hours=167), set_testing_date, freq = 'H') two_weeks_before = pd.date_range(set_testing_date - timedelta(hours=335), set_testing_date - timedelta(hours=168),freq = 'H') df['Date_time'] = pd.to_datetime(df['Date_time']) train1 = df[df['Date_time']<two_weeks_before.min()] train2 = df[df['Date_time']<=two_weeks_before.max()] validation = df[(df['Date_time']>=two_weeks_before.min()) & (df['Date_time']<=two_weeks_before.max())] test = df[(df['Date_time']>=last_week.min()) & (df['Date_time']<=last_week.max())] return [train1, train2, validation, test] def split_to_X_Y(dataset): df = dataset[dataset["Station"] == "Station Zuid"] df_y = df[["Checked_in_passengers", "Checked_out_passengers"]] df_X = df[df.columns.intersection(features)] return [df_y, df_X] # + train1,train2, validation, test = get_train_val_test_split(data) train_y, train_X = split_to_X_Y(train1) validation_y, validation_X = split_to_X_Y(validation) test_y, test_X = split_to_X_Y(test) print("training1:", list(train1["Date_time"])[0],list(train1["Date_time"])[-1]) print("training2:", list(train2["Date_time"])[0],list(train2["Date_time"])[-1]) print("validating:",list(validation["Date_time"])[0],list(validation["Date_time"])[-1]) print("testing:",list(test["Date_time"])[0],list(test["Date_time"])[-1]) print(len(train1)/15) print(len(validation)/15) print(len(test)/15) # - def combine_predictions(y_pred_check_ins,y_pred_check_outs): pred_y = [] for i in range(len(y_pred_check_ins)): pred_y.append([y_pred_check_ins[i],y_pred_check_outs[i]]) return pred_y # + model1 = MultiOutputRegressor(XGBRegressor(objective='reg:linear',n_estimators=100,learning_rate=0.2)).fit(train_X, train_y) pred_y = model1.predict(test_X) mae = metrics.mean_absolute_error(test_y, pred_y) mse = metrics.mean_squared_error(test_y, pred_y) rmse = np.sqrt(mse) # or mse**(0.5) r2 = metrics.r2_score(test_y, pred_y) print("Results of sklearn.metrics:") print("MAE:",mae) print("RMSE:", rmse) print("R-Squared:", r2)
modelling/XGBoost/XGBoost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introducciรณn rรกpida a Python # ## Inteligencia Artificial 2020/2021 # ### <NAME> # Algunos de los ejemplos mostrados son del curso de Introducciรณn a la programaciรณn en Phyton de los cursos CFI https://cursosinformatica.ucm.es/ # # Tambiรฉn se ha utilizado material de introducciรณn a Phyton del profesor <NAME> de la universidad de Sevilla. # # Tenรฉis que ejecutar cada celda de cรณdigo y observar el resultado para comprender el funcionamiento de las instrucciones en la celda. Observar la sintaxis del lenguaje python para cada tipo de instrucciรณn. # ## Expresiones y variables # *Las variables en Python no hay que declararlas* # # Una variable es una **referencia** a una posiciรณn de memoria, en la que estรก almacenada el dato. (2+3)**4 (50-5*6)/4 # Solo se muestra el resultado de la รบltima instrucciรณn x=(2+3)**4 print (x) ancho = 20 alto = 5*9 area = ancho * alto area ancho, alto, area area*=2 area x = 1 y = 2 print('suma:', x + y) print('resta:', x - y) print('multiplicaciรณn:', x * y) print('division:', x / y) print('division entera:', x // y) print('potencia:', y ** 10) x=5 x += 2 x # El รบltimo valor de la celda se imprime por defecto # ## Cadenas de caracteres (Strings) # Son secuencias de caracteres entre comillas simples o dobles. c1= "Esto es una cadena " c2=' y esto tambien es una cadena' frase = c1 + c2 frase # operaciones con cadenas mayusculas = 'antonio'.upper() mayusculas = 'se queda con el ultimo'.upper() print(mayusculas) cadena_datos ="{} tiene {} aรฑos".format('juan', 21).upper() #es un string al que le podemos aplicar las operaciones print(cadena_datos) concatenar = 'hola' + ' ' + 'mundo!' print(concatenar) concatenar+=' y fin..' concatenar frase = concatenar frase*2 #Repetir el string frase # Se puede acceder a caracteres concretos de un string, mediante su รญndice de posiciรณn. En Python los รญndices _empiezan a contar en 0_ y pueden ser negativos (en ese caso, considera el string como circular y cuenta hacia atrรกs desde el รบltimo) frase[7]=a frase[-3] frase[3]+frase[-4] #Operador de slicing en Python: dada una secuencia l (como por ejemplo un string), la notaciรณn l[inicio:fin] indica la subsecuencia de l que comienza en la posiciรณn de รญndice inicio y acaba en la posiciรณn anterior a fin. frase[2:6] # Prueba de celda de Markdown. #En la operaciรณn de _slicing_ se puede incluir un tercer argumento `l[inicio:fin:salto]`, # indicando el salto a la hora de recorrer la lista. #El salto puede ser negativo, indicando recorrido desde el final. frase[6:2:-1] # Por defecto inicio es 0 y fin es la รบltima posicion de la secuencia y el salto es 1. #Si el `salto` es negativo, los valores por defecto de `inicio` y `fin` se intercambian. #Es decir, si no se da `inicio`, serรญa la รบltima posiciรณn, y si no se da `fin` serรญa la primera frase[0::-1] frase[::-1] # El tipo de dato string es una clase predefinida, y las cadenas de caracteres concretas son objetos de la clase string. Por tanto a un objeto de la clase string se le pueden aplicar los mรฉtodos que estรกn predefinidos para la clase string. cad="En un lugar de La Mancha" #el mรฉtodo index de la clase String busca la posicion en la que estรก un substring en la cadena # y da error si no lo encuentra cad.index("Mancha") cad.index("mancha") # Otros mรฉtodos de la clase String son find, upper, count, join, split,.. # El resultado de join y split son listas que aparecen entre corchetes y separadas por , "Rojo y blanco y negro".split(" ") # __Print y format__: la funciรณn print permite escribir cadenas de caracteres por pantalla, y el mรฉtodo format de la clase string nos permite manejar cadenas de caracteres que contienen ciertos "huecos" (_templates_) que se rellenan con valores concretos (al estilo de C) uno ="la " dos =" es " print(uno,"Inteligencia",dos,"Artificial") # La cadena c tiene 3 huecos c="{} por {} es {}" x,y,u,z = 2,3,4,5 # format le pasa los parรกmetros para los 3 huecos print(c.format(x,y,x*y)) print(c.format(u,z,u*z)) # ## Pedir datos por consola nombre = input('Dime tu nombre:') #ย input siempre devuelve una cadena str() numero_entero = int(input('Dime un nรบmero entero:')) numero_real = float(input('Dime un nรบmero real:')) print('Nombre:', nombre, 'Entero:', numero_entero, 'Real:', numero_real) # ## Condicionales x = 25 if x % 2 == 0: print('Divisible por 2') elif x % 3 == 0: print('Divisible por 3') elif x % 5 == 0: print('Divisible por 5') else: print('Ni idea') # condiciones compuestas year = int(input('Introduce un aรฑo:')) if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0): print('bisiesto') else: print('no es bisiesto') 2==2 # Los valores logicos de verdad y falsedad en Python son `True` y `False`. Podemos comparar nรบmeros con el operador de comparaciรณn `==` (no confundir con el de asignaciรณn `=`), o comprobar si son distintos con `!=`, tambien se pueden usar los operadores lรณgicos usuales. 2!=4 and 2==1 # ## Bucles bรกsicos suma = 0 i = 1 while i <= 10: suma += i i += 1 suma # ## Tuplas # Las _tuplas_ en Python son secuencias de datos separadas por comas. Usualmente van entre parรฉntesis, aunque no es obligatorio (excepto para la tupla vacรญa). Ejemplos: # El operador coma crea tuplas. Se pueden escribir o no entre parรฉntesis. a = 1, 2, 3 print(a) b = (4, 5, 6) print(b) c=() c # Como las tuplas son secuencias, algunas de las operaciones de strings, tambiรฉn se pueden aplicar a las tuplas. En particular, el acceso a elementos a travรฉs de la posiciรณn, el operador de slicing, o la concatenaciรณn # Se pueden leer sus componentes de forma individual a[0] # + # Las tuplas son inmutables. #a[0] = 10 # ERROR! # - # La asignaciรณn entre tuplas asigna campo a campo. # (parece asignaciรณn mรบltiple pero es asignaciรณn entre tuplas) a, b, c = 10, 20, 30 print(a) print(b) print(c) 1, # No confundir con (1), que serรญa simplememente el nรบmero 1 (1) 1 a=("Uno","Dos","Tres","Cuatro") a[::1] #inversiรณn de una tupla a[::-1] # Las tuplas son tipos de datos __inmutables__. Esto significa que una vez creadas, no podemos cambiar su contenido. a[1]="otro" # ## Rangos range(1, 10) # Representa los nรบmeros del 1 al 9 >=1 y <=9 # los bucles for permiten recorrer estructuras de datos for x in range(1, 10): print(x) # del 1 al 9 de 3 en 3 for x in range(1, 10, 3): print(x) # del 10 al 2 hacia abajo for x in range(10, 1, -1): print(x) # si sรณlo tiene una argumento es el lรญmite superior for x in range(3): print(x) # ## Listas # Las listas, al igual que las tuplas, son __secuencias__ de datos. Pero son __mutables__ (es decir, podemos cambiar su contenido). # # Una lista se representa como una secuencia de datos entre corchetes y separadas por comas. tupla=("Uno","Dos","Tres","Cuatro") lista=["Uno","Dos","Tres","Cuatro"] lista[0]='Otro' lista a = [] # Lista vacรญa len(a) # Devuelve la longitud de una lista # Pueden contener cualquier tipo de elementos combinados. # Se aรฑaden elementos a la lista con append a.append('hola') a.append(123) a.append(True) a # Los mรฉtodos `append` y `extend`, respectivamente aรฑaden un elemento al final, y concatenan una lista al final. Nรณtese que son mรฉtodos __destructivos__, en el sentido de que modifican la lista a la que se aplican. a = ["Hola"] a.extend([6,"que tal"]) a # Podemos acceder y modificar sus elementos con acceso directo a[1] += 1 a # podemos borrar elementos a partir de su posiciรณn del a[0] a # podemos borrar elementos. Si el elemento no estรก o la lista estรก vacรญa da error. a.remove(7) a # El mรฉtodo pop elimina un elemento de una lista (especificando la posiciรณn, por defecto la รบltima), y devuelve dicho elemento como valor. # Si hacemos pop en una lista vacรญa da error a.pop() # Podemos crear listas con elementos a = [1, 2, 3, 4] a # En python podemos definir listas por 'comprensiรณn': a = [2*x for x in range(1,10)] a # el bucle for permite recorrer listas for elemento in a: print(elemento) a = list(range(1,11)) # el constructor list() crea listas a # Con el operador de slicing ':' que hemos visto para tuplas podemos seleccionar trozos de una lista y trabajar con ellos como si fueran listas. En realidad son vistas sobre la lista original, no se duplican los elementos en memoria. #El mรฉtodo `insert` inserta un elemento en una lista, en una posiciรณn dada a.insert(3,"x") a # Usa el valor por defecto de la posicion inicial a[:2] a[0:3] # selecciรณn de los 3 primeros elementos. Tambien se puede escribir a[:3] a[1:4] # selecciรณn de los elementos en las posiciones 1, 2 y 3 a[1:6:2] # saltando de 2 en 2 a[3:0:-1] # Elementos en posiciones 3, 2, 1 a[::-1] # ver la lista al revรฉs a[-3:] # ultimos 3 elementos de la lista bocadillo = ["pan", "jamon", "pan"] bocadillo = 2*bocadillo[:2] + ["huevo"] print(bocadillo) bocadillo + [bocadillo[-1]] "tomate" in bocadillo # + # Un error muy comรบn debido a que las variables en Phyton son referencias. l=[28,1,54,6] m=l # asignamos a m "el valor" de l m[2]=11 # cambiamos m # - # La secuencia anterior tiene un error muy comรบn debido a que las variables en Phyton son referencias.Piensa quรฉ pasarรญa si consultamos m y l m l # ยฟSe te ocurre alguna manera __correcta__ de obtener una versiรณn modificada de una lista sin cambiar la original? l=[28,1,22,6] m=l[:] # asignamos a m una COPIA del valor de l usando slicing # Estamos haciendo una copia idรฉntica usando los valores por defecto de inicio y fin. m[2]=11 # cambiamos m m l # ## Conjuntos # definir conjuntos dias_semana = { 'lunes', 'martes', 'miรฉrcoles', 'jueves', 'viernes', 'sรกbado'} # pertenencia 'miรฉrcoles' in dias_semana 'domingo'not in dias_semana # aรฑadir elementos dias_semana.add('domingo') 'domingo' in dias_semana # los elementos no se guardan ordenados dias_semana # no admite elementos repetidos print(len(dias_semana)) # nรบmero de elementos dias_semana.add('lunes') print(len(dias_semana)) # recorrido de conjuntos for elemento in dias_semana: print(elemento) # el constructor set() crea conjuntos # quitar elementos repetidos de una lista #set([1,2,3,1,2,3,1,2,3]) l=(1,3,2) a=list (l) 4 not in a # Tambiรฉn se pueden definir por comprensiรณn cuadrados = { x**2 for x in range(1,10)} cuadrados # ## Diccionarios # Un diccionario en Python es una estructura de datos que permite asignar valores a una serie de elementos (claves). En otros lenguajes de programaciรณn, esta estructura de datos se conoce como map o tabla hash. # Se representan como un conjunto de parejas clave:valor, separadas por comas y entre llaves. En el siguiente ejemplo, la clave "juan" tiene asignado el valor 4098, y la clave "ana"tiene asignado el valor 4139 dict_telefonos = {"juan": 4098, "ana": 4139} #acceso a clave dict_telefonos["ana"] #aรฑadir una nueva pareja clave/valor dict_telefonos["pedro"]=2321 #cambiar un valor dict_telefonos["ana"] = 4140 dict_telefonos #borrar una pareja clave/valor del dict_telefonos["ana"] print(dict_telefonos) # Contienen pares (clave, valor) diccionario = {'uno': 'one', 'dos': 'two', 'tres': 'three', 'cuatro': 'four'} diccionario # acceso a elementos por clave diccionario['uno'] # claves diccionario.keys() # pares (clave, valor) diccionario.items() # Son mutables diccionario['cinco'] = 'five' diccionario # elementos len(diccionario) # recorrido for clave in diccionario: print(clave, diccionario[clave]) for clave, valor in diccionario.items(): print(clave, valor) # ## Algo mรกs sobre Bucles # + # Buscar la posiciรณn ind de un elemento en una lista. Si no se encuentra, ind=-1 ind =0 busco = "premio" lst = ["nada","pierdo","premio","sigue"] while ind < len(lst) and lst[ind] != busco: ind += 1 if ind == len(lst): ind=-1 ind # - # El bucle `for`: # # * `for var in seq` # * `for var in range(n)` # # En el primer caso, `seq` es una secuencia (por ejemplo, una lista, tupla, o string), generรกndose tantas iteraciones como elementos tenga la secuencia, y en cada iteraciรณn, `var`va tomando los sucesivos valores de la secuencia. Por ejemplo: # + # Cรกlculo de media aritmรฉtica l, suma, n = [1,5,8,12,3,7], 0, 0 for e in l: suma += e n +=1 suma/n # + # Cรกlculo de nรบmeros primos entre 3 y 20 primos = [] for n in range(3, 20, 2): for x in range(2, n): if n % x == 0: print(n, "es", x, "*", n//x) break else: primos.append(n) primos # - # ### Otros patrones de iteraciรณn # # - `for k in dicc:` itera la variable `k` sobre las claves del diccionario `dicc`. # - `for (k,v) in dic.items():` itera el par `(k,v)` sobre los pares $(clave,valor)$ del diccionario `dicc`. # - `for (i,x) in enumerate(l):` itera el par `(i,x)`, donde `x` va tomando los distintos elementos de `l` e `i` la correspondiente posiciรณn de `x` en `l`. # - `for (u,v) in zip(l,m):` itera el par `(u,v)` sobre los correspondientes elementos de `l` y `m` que ocupan la misma posiciรณn. # - `for x in reversed(l):` itera `x` sobre la secuencia `l`, pero en orden inverso. # + preguntas = ["nombre", "apellido", "color favorito"] respuestas = ["Juan", "Pรฉrez", "rojo"] for p, r in zip(preguntas, respuestas): print("Mi {} es {}.".format(p, r)) # - # ## Funciones # Definiciรณn de una funciรณn def resta(x, y): return x - y resta(20, 5) # podemos pasar los algumentos por posiciรณn o por nombre resta(y=2, x=10) # si mezclamos, los primeros tienen que pasarse por posiciรณn resta(10, y=3) # + # Los parรกmetros de tipos bรกsicos se pasan por valor def incrementa_mal(x): x += 1 x = 5 incrementa_mal(x) x # + # Los parรกmetros de tipos compuestos se pasan por variable def add_dos(lista): lista.append(2) l = [] add_dos(l) l # + # Las funciones sรณlo pueden devolver un valor (return) pero puede ser una tupla (!!!) def suma_resta(x, y): return x + y, x - y a, b, = suma_resta(10, 3) a, b # + # Los parรกmetros pueden tener valores por defecto def incrementa(x, delta=1): return x + delta print(incrementa(5, 2)) print(incrementa(5)) # - # ## Mรณdulos # + # Para usar las funciones de un mรณdulo, primero tenemos que importarlo import math math.factorial(10) # + # Si el nombre del mรณdulo es muy largo y no queremos escribirlo todo el rato # podemos darle un 'apodo' import numpy as np np.add([1, 2, 3], [3, 4, 5]) # suma de vectores # + # Tambiรฉn podemos importar funciones concretas y ahorranos escribir el nombre # del mรณdulo cuando las usemos from math import factorial factorial(10) # + #ย Las bibliotecas normalmente exportan varios mรณdulos que pueden estar anidados import matplotlib.pyplot as plt x = np.linspace(-10, 10, 21) # 21 valores desde el -10 hasta el 10 equidistantes y = [e*e for e in x] plt.plot(x, y) # - # Y muchas cosas mรกs... # # https://docs.python.org # # Numpy # Una de las grandes carencias de Python es que no tiene arrays y las listas no son eficientes para trabajar con grandes cantidades de datos. Numpy es la biblioteca que permite la gestiรณn eficiente de arrays multidimensionales. # + import numpy as np # convertir una lista en un array l = [1, 2, 3, 4, 5, 6, 7, 8, 9] a = np.array(l) a # - # cambiar la forma de un array b = a.reshape(3,3) b # dimensiones b.shape # acceso a elementos print(b[0,0]) print(b[0,1]) print(b[2,2]) # vistas de subarrays b[0:2, 0:3] #ย arrays inicializados, te devuelve un nuevo array de 2 filas y 5 columnas inicializado a 0 np.zeros((2,5)) np.ones((2,5)) # devuelve una matriz diagonal 5x5 np.eye(5,5) #ย arange devuelve un array con los valores del 1 al 9 m = np.arange(1,10).reshape(3,3) m # Operaciones con todos los elementos m * 2 2 ** m # suma de todos los elementos m.sum() # suma por columnas m.sum(axis=0) # suma por filas m.sum(axis=1) # producto vectorial de dos matrices np.dot(m, m) # Y muchas cosas mรกs... # # http://www.numpy.org/
Practicas-IA/intro-python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # "Test2" # > "Awesome summary" # # - toc: true # - branch: master # - badges: true # - comments: true # - author: Ziltoid # - categories: [fastpages, jupyter] # Test of markdown text #hide_input print("hidden input") # Test image: # ![](my_icons/test.jpg) # Gif test: :+1:!
_notebooks/2021-07-15-test2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cedf868076a2" # ##### Copyright 2020 The Cirq Developers # + cellView="form" id="906e07f6e562" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="19599098c1f9" # # Introduction to Cirq # + [markdown] id="8bd3406cf99e" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://quantumai.google/cirq/tutorials/educators/intro"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/educators/intro.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/educators/intro.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/educators/intro.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> # </td> # </table> # + [markdown] id="8m9ye4AS6dE4" # [Cirq](https://github.com/quantumlib/cirq) is a framework for writing quantum algorithms for noisy intermediate scale quantum (NISQ) devices. Roughly speaking, NISQ devices are those with O(100) qubits that can enact O(1000) gates. Because the resources for NISQ devices are so constrained, we believe that a framework for writing programs on these devices needs to be aware of all of the architectural properties of the device on which the algorithm is written. This is in contrast to other frameworks where there is a clean separation between the abstract model being used and the details of the device. # # In this tutorial we will walk through the basics of writing quantum alogorithms in Cirq. Our final goal will be to write a variational ansatz for use in an optimization algorithm. # + [markdown] id="cc948e49cecb" # ## Installing Cirq # + [markdown] id="rPgPbry6-mF3" # To use Cirq one first needs to install Cirq. Installation instructions are available at [quantumai.google/cirq under "Installing Cirq"](https://quantumai.google/cirq/install). For the purpose of this tutorial, we run `pip install cirq` as shown in the following code cell to install the latest release of Cirq. # # > Different notebook execution systems exist, but for most part they have "run" button on a cell which you can click, or "shift + enter" is often the shortcut to run the cell. # + id="RlJBDvNgC00H" try: import cirq except ImportError: print("installing cirq...") # !pip install cirq --quiet print("installed cirq.") import cirq import matplotlib.pyplot as plt import numpy as np # + [markdown] id="GPjUqrK8DJTq" # Let's check that Cirq has been successfully installed by importing Cirq and printing out a diagram of Google's Bristlecone device shown below. # # ![Google's Bristecone chip](https://4.bp.blogspot.com/-b9akad6ismU/WpmyaJo-cYI/AAAAAAAACa8/mCqPBJxv5oUivy6Jq42FSOQYkeRlTmkiwCLcBGAs/s1600/image1.png) # + id="FTrmLyq4C2gf" """Test successful installation by printing out the Bristlecone device.""" print(cirq.google.Bristlecone) # + [markdown] id="09zRgohCMiBs" # This cell should run successfully, and the output should in fact be the grid of qubits for the Bristlecone device. If so, the install worked! # # > Be aware that Cirq is still alpha software, meaning **breaking changes can happen at any time**. If you don't want your project to suddenly go from working to not working when we a new version is released, you should depend on a *specific version* of Cirq and periodically bump that version to the latest one. For example, you can run `pip install cirq==x.y.z` to install version `x.y.z` of Cirq. # + [markdown] id="3340594dd8c1" # ## Qubits, Moments, Operations, and Circuits # + [markdown] id="8A7a3jcql1l5" # In Cirq, circuits are represented either by a `Circuit` object. Conceptually: # # - Q `Circuit` is a collection of `Moment`s. # - A `Moment` is a collection of `Operation`s that all act during the same abstract time slice. # - An `Operation` is a an effect that operates on a specific subset of Qubits. # - The most common type of `Operation` is a `Gate` applied to several qubits (a "`GateOperation`"). # # These ideas are illustrated by the following diagram. # + [markdown] id="03b7d753ecd5" # ![Circuits, Moments, and Operations.](/cirq/images/CircuitMomentOperation.png) # + [markdown] id="9768d38c9151" # ### Create a `Circuit` # + [markdown] id="VFwmWPf7D057" # A typical way to create a `Circuit` is shown below. # + id="pE88WsFeDGfs" """Creating a circuit.""" # Define three qubits. a = cirq.NamedQubit("a") b = cirq.NamedQubit("b") c = cirq.NamedQubit("c") # Define a list of operations. ops = [cirq.H(a), cirq.H(b), cirq.CNOT(b, c), cirq.H(b)] # Create a circuit from the list of operations. circuit = cirq.Circuit(ops) print("Circuit:\n") print(circuit) # + [markdown] id="-06jQwEdI4DJ" # We can unpack this a bit and see all of the components for the circuit. # # The first thing we do is pick some qubits to use. There are many different types of qubits in Cirq, and you can define your own by inheriting from the `cirq.Qid` class. There's nothing inherently special or magical about these quantum id types such as `cirq.NamedQubit`. They simply identify what you wish to operate on, which is relevant when you are targeting a specific device. For example, if we were creating a circuit for the Bristlecone device and wanted to refer to the qubit in the left-most position, we would use `cirq.GridQubit(5, 0)`. (See the first diagram of the Bristlecone device we printed out.) For simplicity, in the previous cell we defined `cirq.NamedQubit`s which are simply qubits that can be identified by a name. # # Next, we encounter the object `cirq.H` which is a Hadamard gate with unitary # # $$ # H = {1 \over \sqrt{2}} \left[ \begin{array}[cc] & 1 & 1 \\ 1 & -1 \end{array}\right] . # $$ # # In Cirq, `cirq.H` is an instance of the `cirq.HGate` class, which itself is a subclass of `Gate` (along with other classes). We can use Cirq to see the unitary matrix of `Gate` objects as follows. # + id="YKfg575v1DQB" """Get the unitary of a gate, here the Hadamard gate.""" cirq.unitary(cirq.H) # + [markdown] id="hJMAciW21KEg" # We see that this agrees with the unitary for the Hadamard gate above. # # `Gate` objects have the ability to applied "on" one or more qubits. There are two ways to do this for gates, either using the `on` method or by directly calling the gate on the qubits as if the gate were a function and the qubits were arguments. For example to apply the `H` onto qubit `a` we can say `cirq.H.on(a)` or `cirq.H(a)`. # # The result of those expressions is a `GateOperation` object, which is a type of `Operation`. # # > **Note**: In Cirq, there is a strong distinction between `Operation`s and `Gate`s. An `Operation` is associated with specific qubits and can be put in `Circuit`s. A `Gate` has unspecified qubits, and will produce an operation when acting on qubits. # # Once you have a collection of operations, you can construct a `Circuit` by passing the operations into the constructor for a `Circuit`: # # ``` # ops = [list of operations] # circuit = cirq.Circuit(ops) # ``` # # The last thing we did in the example code was use the (surprisingly useful) ability to print the circuit as a text diagram. # # The diagram is visually helpful, but it doesn't really get into the internal details of how the `Circuit` is represented. As mentioned, a `Circuit` is made up of a sequence of `Moment` objects, and each `Moment` object is a list of non-overlapping `Operation`s. To see this internal structure, we can iterate over the `Moment`s in the `Circuit` and print them out. # + id="hH-y4JiEMv25" """Print out the moments in a circuit.""" print("Circuit:\n") print(circuit) print("\nMoments in the circuit:\n") for i, moment in enumerate(circuit): print('Moment {}: {}'.format(i, moment)) # + [markdown] id="pm5iC7MNQY6-" # We see that this circuit consists of three moments. For even more on the underlying structure of a circuit, we can print the circuit's `repr`. This returns a more detailed (and usually less readable) expression. # + id="2Y6zG_peQG1y" """Print the repr of a circuit.""" print(repr(circuit)) # + [markdown] id="zyVbU8yfW_qi" # Although it is less readable, the usefulness of printing the `repr` is that it includes *all* the gory details which can be useful when debugging. The `repr` is also a valid python expression that evaluates to the circuit. # For example, if we notice that a circuit generated in some complicated way triggers a bug in a simulator, copy-pasting the generated circuit's `repr` into a test, and then working from there, is a simple way to decouple the reproduction of the bug from the circuit generation code. # + [markdown] id="0bb8611c3865" # ### More ways to create `Circuit`s # + [markdown] id="uaDb6B_jPgrb" # Above we created a `Circuit` by passing in a list of operations to its constructor. In Cirq, there are many ways to construct and modify circuits, and each of these is useful in different contexts. Here are a few examples: # # # 1. `Circuit(...)`: This is the simplest way to make a circuit. Give this method some operations, and out pops a circuit. # 2. `append`: `Circuit`s are mutable. You can start with an empty `circ = cirq.Circuit()` and simply `circ.append(operations)` to add on more and more operations . # 3. `insert`: Instead of appending, you can insert before a particular moment location (labeled by an integer index). # # One interesting, and extremely convenient, fact about `Circuit(...)`, `append`, and `insert` is that they "auto flatten" whatever you give them. # You *can* give them a list of operations, but you can also give them # # - a list *of lists* of operations, # - a generator function that sometimes yields tuples of operations and other times yields individual operations, # - or just a single operation (without a list around it). # # If it can recursively iterated into individual operations, these three methods will take it. # # > The above idea uses a concept we call an `OP_TREE` in Cirq. An `OP_TREE` is not a class, but a contract. The basic idea is that if the input can be iteratively flattened into a list of operations, then the input is an `OP_TREE`. # # The main place where auto-flattening is useful is when you are building a circuit's operations using generators. # # > Recall that, in Python, functions that have a `yield` statement are *generators*. Generators are functions that act as *iterators*. # # In this context, auto-flattening means that generators producing operations for a circuit can simply `yield` sub-generators (instead of iterating over them and yielding their items). We show an example of this below. # + id="QFoV-eOE1tGN" """Creating a circuit from generator functions.""" def xor_swap(a, b): """Swaps two qubits with three CNOTs.""" yield cirq.CNOT(a, b) yield cirq.CNOT(b, a) yield cirq.CNOT(a, b) def left_rotate(qubits): """Rotates qubits to the left.""" for i in range(len(qubits) - 1): a, b = qubits[i: i + 2] yield xor_swap(a, b) # Get five qubits on a line. line = cirq.LineQubit.range(5) # Create a circuit which rotates the qubits to the left. print(cirq.Circuit(left_rotate(line))) # + [markdown] id="ae159315c56d" # One can see how this method of creating circuits is quite powerful. # # > Note that `cirq.SWAP` is a pre-defined gate in Cirq. We used three `cirq.CNOT`s instead of `cirq.SWAP` in the above example to demonstrate auto-flattening with generators. # + [markdown] id="60d8516a19b2" # ### Insert strategies # + [markdown] id="p9LUxAU41wWs" # You may have noticed that there is a hole in what we've explained so far. We have been passing a one-dimensional sequence of operations, but the output is a two-dimensional circuit (a list-of-lists-of-operations). There is a degree of freedom that hasn't been account for. Specifically, how does Cirq choose the moment that each operation will be placed within? # # The answer is the concept of a `cirq.InsertStrategy`. An `InsertStrategy` defines how `Operation`s are placed in a `Circuit` when requested to be inserted at a given location. Here a `location` is identified by the index of the `Moment` in the `Circuit` that operations should be placed before. # # > *Note*: In the case of `Circuit.append` this means inserting at the index `len(circuit)` which is one more than the largest moment index and so represents the end of the circuit. # # There are currently four insertion strategies in Cirq: # # 1. `InsertStrategy.EARLIEST` (currently the default), # 2. `InsertStrategy.NEW`, # 3. `InsertStrategy.INLINE`, # 4. `InsertStrategy.NEW_THEN_INLINE`. # # The strategy `InsertStrategy.EARLIEST` is defined as follows: # # > `InsertStrategy.EARLIEST`: Scans backward from the insert # > location until a moment with operations touching qubits affected by the # > operation to insert is found. The operation is added into the moment just # > after that location. # # For example, if we first create an `Operation` in a single moment, # and then use `InsertStrategy.EARLIEST` the `Operation` can slide back to this # first `Moment` if there is space. # + id="wNek1WjpX4MR" """Appending operations with InsertStrategy.EARLIEST.""" # Create an empty circuit. circuit = cirq.Circuit() # Append an operation. # Note: InsertStrategy.EARLIEST is used by default if not otherwise specified. circuit.append([cirq.CZ(a, b)]) # Append more operations. # Note: InsertStrategy.EARLIEST is used by default if not otherwise specified. circuit.append([cirq.H(a), cirq.H(b), cirq.H(c)]) # Display the circuit. print("Circuit:\n") print(circuit) # + [markdown] id="4d93a69cfcb8" # After creating the first moment with a `CZ` gate, the second # append uses the `InsertStrategy.EARLIEST` strategy. The # `H` on ``a`` and ``b`` cannot slide back, while the `H` on ``c`` can and so ends up in the first `Moment`. # + [markdown] id="TcHeZM6qXvbS" # While `InsertStrategy.EARLIEST` is the default strategy, the second most important strategy is `InsertStrategy.NEW_THEN_INLINE`, defined as follows: # # > `InsertStrategy.NEW_THEN_INLINE`: For the first operation, add it to a new # > `Moment` the insertion point. Attempts to add the operation after the first # > operation to insert into the moment just before the desired insert location. # > But, if there's already an existing operation affecting any of the qubits # > touched by the operation to insert, a new moment is created instead and this # > `Moment` is the one that is subsequently used for insertions. # # To see an example of this strategy, we create a circuit with the same operations but inserting them with a different strategy. # + id="qWVDhLxFYuRp" """Appending operations with InsertStrategy.NEW_THEN_INLINE.""" # Create an empty circuit. circuit = cirq.Circuit() # Append an operation. circuit.append([cirq.CZ(a, b)], strategy=cirq.InsertStrategy.NEW_THEN_INLINE) # Append more operations. circuit.append([cirq.H(a), cirq.H(b), cirq.H(c)], strategy=cirq.InsertStrategy.NEW_THEN_INLINE) # Display the circuit. print("Circuit:\n") print(circuit) # + [markdown] id="69a53a1f5de2" # In contrast to the previous codeblock using `InsertStrategy.EARLIEST`, we see that the three `cirq.H` gates appended after the `cirq.CZ` gate appear in the same moment when we use `InsertStrategy.NEW_THEN_INLINE`. # + [markdown] id="y9conKPAPn26" # ### Exercise: Create a circuit # # Now that you've learned about `InsertStrategy`s, here is an exercise to validate your understanding. Create, **using the least number of appends**, the following circuit: # # # # ``` # a: โ”€โ”€โ”€@โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€Hโ”€โ”€โ”€Hโ”€โ”€โ”€ # โ”‚ # b: โ”€โ”€โ”€@โ”€โ”€โ”€โ”€โ”€โ”€โ”€Hโ”€โ”€โ”€@โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€ # โ”‚ # c: โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€@โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ # ``` # # Here imagine that you want exactly the moments indicated by the spacing of the circuit so that there are six moments in this circuit. # + [markdown] id="KnA4uBkwEw5-" # #### Solution # + cellView="both" id="jP4VkPeHcjJT" #@title # Define three qubits. a = cirq.NamedQubit('a') b = cirq.NamedQubit('b') c = cirq.NamedQubit('c') # Get an empty circuit. circuit = cirq.Circuit() # Append these gates using cirq.InsertStrategy.EARLIEST (the default strategy). circuit.append([cirq.CZ(a, b), cirq.H(c), cirq.H(a)]) # Append these gates using cirq.InsertStrategy.NEW_THEN_INLINE. circuit.append( [cirq.H(b), cirq.CZ(b, c), cirq.H(b), cirq.H(a), cirq.H(a)], strategy=cirq.InsertStrategy.NEW_THEN_INLINE ) # Display the circuit. print("Circuit:\n") print(circuit) # + [markdown] id="X15yPl_KQ20Z" # ## Simulations of a Circuit # # Now that we know how to construct `Circuit`s in Cirq, let's see how to simulate them. First we create a simple circuit to simulate in the following cell. # + id="V6tZk3qGqBoH" """Get a circuit to simulate.""" def basic_circuit(measure=True): """Returns a simple circuit with some one- and two-qubit gates, as well as (optionally) measurements. """ # Gates we will use in the circuit. sqrt_x = cirq.X**0.5 cz = cirq.CZ # Yield the operations. yield sqrt_x(a), sqrt_x(b) yield cz(a, b) yield sqrt_x(a), sqrt_x(b) if measure: yield cirq.measure(a,b) # Create a circuit including measurements. circuit = cirq.Circuit(basic_circuit()) print(circuit) # + [markdown] id="WpywVOeDqi4Q" # The main simulator in Cirq is the `cirq.Simulator`. The general pattern of simulation is to instantiate this simulator, then pass in a circuit to either the `run` or `simulate` methods (more on this below). # + id="KmGuMjvGw_Ef" """Example of simulating a circuit in Cirq.""" # Get a simulator. simulator = cirq.Simulator() # Pass the circuit to the simulator.run method. result = simulator.run(circuit, repetitions=1) print("Measurement results:") print(result) # + [markdown] id="aHugx9T0z047" # Running this multiple times should result in different measurement results, since the circuit produces a superposition over all computational basis states. # + [markdown] id="416e9c012263" # Above we used the `run` method of the `simulator`. In Cirq, `run` methods mimic the actual hardware in that they don't give one access to unphysical objects like the wavefunction. The `repetitions` argument is how many times to sample from the circuit. # # If one wants to get the wavefunction, the `simulate` methods can be used as shown below. # + id="Apj7WiFZ0WFm" """Simulating a circuit with the `simulate` method.""" # Get a circuit without measurements. circuit = cirq.Circuit(basic_circuit(measure=False)) # Simulate the circuit. result = simulator.simulate(circuit, qubit_order=[a, b]) # Print the final state vector (wavefunction). print("Wavefunction:") print(np.around(result.final_state_vector, 3)) # Print the wavefunction in Dirac notation. print("\nDirac notation:") print(result.dirac_notation()) # + [markdown] id="t18-sIJc0cvf" # Notice that we passed a `qubit_order` into the `simulate` method. This order helps define the order of the kronecker (tensor) product used in the resulting `final_state_vector`. # # > *Note*: The `qubit_order` argument is optional. When it is omitted, qubits are sorted ascending according to the ordering methods defined by their Python class (for example `cirq.NamedQubit` sorts lexicographically by name). # If there are multiple types of qubits in one circuit, the name of the type is used as a tie breaker. # # The simplest `qubit_order` value you can provide is a list of the qubits in the desired order. Any qubits from the circuit that are not in the list will be ordered using the default `__str__` ordering, but come after qubits that are in the list. # # > **Note**: Be aware that all qubits in the list are included in the simulation, even if they are not operated on by the circuit. # # The mapping from the order of the qubits to the order of the amplitudes in the wave function can be tricky to understand. Basically, it is the same as the ordering used by `numpy.kron`. # # > If the wavefunction is the array # >> (0.1, 0.2, 0.3, 0.4), # # > then this is # >> 0.1|00โŸฉ + 0.2|01โŸฉ + 0.3|10โŸฉ + 0.4|11โŸฉ # # > in Dirac notation. If # >> qubit order = [a, b] # # > then |00> means qubit a is in 0 and qubit b is in 0, |01> means # > qubit a is 0 and qubit b is 1, etc. # # Another way to think about the qubit-to-amplitude ordering is as "for loop ordering": # # ``` # for a in [0, 1]: # for b in [0, 1]: # print(a, b) # ``` # # The first index (the outermost loop) is the slowest to vary. # + [markdown] id="YLpiz0aN1Jd6" # ### Repetitions and histograms # # As mentioned, the simulator `run` methods also take an option for repeating the circuit, namely, the `repetitions` argument. If the measurements in the circuit are terminal and all other operations are unitary, this simulator is optimized to not recompute the wavefunction before sampling from the circuit. # + id="QxkmBlo21lrQ" """Simulate a circuit using 1000 repetitions.""" # Get a circuit with terminal measurements to simulate. circuit = cirq.Circuit(basic_circuit()) # Sample from the circuit 1000 times. result = simulator.run(circuit, repetitions=1000) # Get a histogram of measurement results. print(result.histogram(key="a,b")) # + [markdown] id="bD0zX0zP2HxQ" # Here we have also demonstrated the use of the `histogram` method on the `result` which sums over all the different results for all of the different repetitions. # # The `histogram` method can also be given a `fold_func` argument, in order to group measurement results under some key before counting them up. # For example, we can group by whether or not the two measurement results agreed: # + id="rPqVUsD9snYf" print(result.histogram(key="a,b", fold_func=lambda bits: "agree" if bits[0] == bits[1] else "disagree")) # + [markdown] id="qFsytBIbOVD8" # ## The Deutsch-Jozsa Algorithm # # The very first indication that quantum computers could be more powerful than classical computers was provided by <NAME> in his 1985 paper # # > <NAME>, "[Quantum Theory, the Church-Turing Principle and the Universal Quantum Computer](https://people.eecs.berkeley.edu/~christos/classics/Deutsch_quantum_theory.pdf)" *Proc. R. Soc. Lond.* A **400** 97โ€“117. http://doi.org/10.1098/rspa.1985.0070 # # This algorithm was extended by Deutsch and <NAME> to a more convincing algorithmic seperation and what is now called the Deutsch-Jozsa algorithm. In this section we will show how to write circuits for the Deutsch algorithm and then as an exercise in using Cirq for algorithms for a small version of the Deutsch-Jozsa algorithm. # # Let's begin with the Deutsch algorithm. In Deutsch's algorithm you are given access to a box which computes a one bit boolean function. That is it is a box which takes in a bit and outputs a bit. If we want to be a mathematician or theoretical computer scientist we write the function $f$ as $f: \{0, 1\} \rightarrow \{0, 1\}$. There are exactly four such boolean functions which we can write out in a table # # | $x$ | $f_0$ | $f_1$ | $f_x$ | $f_{\bar{x}}$ | # | --- | --- | --- | --- | --- | # | 0 | 0 | 1 | 0 | 1 # | 1 | 0 | 1 | 1 | 0 # # The first two of these are *constant* functions, $f_0$ and $f_1$. That is they always output a constant value (independent of the input). The other two $f_x$ and $f_\bar{x}$ are *balanced*. Over their inputs $0$ and $1$, they have an equal number of $0$s and $1$s in their truth table. # # We can now state Deutsch's problem: # # > Given access to a one bit input one bit output boolean function, determine by querying the function whether the function is *balanced* or *constant*. # # It shouldn't take you much to convince yourself that in order to solve this problem classically you need to call the function on both possible input values. The easiest way to see this is just to consider what happens if you query the function on one particular input and notice that, for either input, learning the value of the function does not separate the constant from balanced functions. In summary: # # *Classically one must query the binary function twice to distinguish the constant function from the balanced function.* # + [markdown] id="UAec5ZBuSWYU" # Now lets turn to the quantum approach to this problem. There is one bit of book keeping we need to take care of. Above we have described a classical function on bits that is not reversible. That is, knowing the values of the output does not allow us to determine uniquely the value of the input. In order to run this on a quantum computer, however we need to make this computation reversible. A trick for taking a classical non-reversible function and making it "quantum happy" is to compute the value in an extra register and store the input. Suppose we have an $n$ bit input $x$ and we are computing a (potentially non-reverisble) boolean function $f(x)$. Then we can implement this via a Unitary $U_f$ that acts like on $n + 1$ qubits # # $$ # U_f |x\rangle |y\rangle = |x\rangle | y \oplus f(x)\rangle . # $$ # # Here $\oplus$ is addition modulo $2$ (XOR) and we have identified how $U_f$ acts by its action on all computational basis states $|x\rangle$ ($n$ input qubits) and $|y\rangle$ ($1$ output qubit). To see that this is reversible one can note that applying the transformation twice returns the state to its original form. # # Let's see how to implement these functions in Cirq. # # $f_0$ enacts the transform # $$ # \begin{eqnarray} # |00\rangle &\rightarrow& |00\rangle \\ # |01\rangle &\rightarrow& |01\rangle \\ # |10\rangle &\rightarrow& |10\rangle \\ # |11\rangle &\rightarrow& |11\rangle \\ # \end{eqnarray} # $$ # Well this is just the identity transform, i.e. an empty circuit. # # $f_1$ enacts the transform # $$ # \begin{eqnarray} # |00\rangle &\rightarrow& |01\rangle \\ # |01\rangle &\rightarrow& |00\rangle \\ # |10\rangle &\rightarrow& |11\rangle \\ # |11\rangle &\rightarrow& |10\rangle \\ # \end{eqnarray} # $$ # This is the `cirq.X` bit flip gate on the second qubit. # # $f_x$ enacts the transform # $$ # \begin{eqnarray} # |00\rangle &\rightarrow& |00\rangle \\ # |01\rangle &\rightarrow& |01\rangle \\ # |10\rangle &\rightarrow& |11\rangle \\ # |11\rangle &\rightarrow& |10\rangle \\ # \end{eqnarray} # $$ # This is nothing more than a `cirq.CNOT` from the first bit to the second bit. # # Finally $f_\bar{x}$ enacts the transform # $$ # \begin{eqnarray} # |00\rangle &\rightarrow& |01\rangle \\ # |01\rangle &\rightarrow& |00\rangle \\ # |10\rangle &\rightarrow& |10\rangle \\ # |11\rangle &\rightarrow& |11\rangle \\ # \end{eqnarray} # $$ # which is a `cirq.CNOT` from the first bit to the second bit followed by a `cirq.X` on the second bit. # # We can encapulate these functions into a dictionary from a oracle name to the operations in the circuit needed to enact this function. # + id="YtWiBHonly69" """Store the operations to query each function in a dictionary.""" # Get qubits for the operations to act on. q0, q1 = cirq.LineQubit.range(2) # Define the dictionary of operations. The key of each dictionary entry # is the subscript of the function f in the above explanatory text. oracles = { '0': [], '1': [cirq.X(q1)], 'x': [cirq.CNOT(q0, q1)], 'notx': [cirq.CNOT(q0, q1), cirq.X(q1)] } # + [markdown] id="axCSYj3EmAEo" # We now turn to Deutch's algorithm. Suppose we are given access to the reversible oracle functions we have defined above. By a similar argument for our irreversible classical functions you can show that you cannot distinguish the balanced from the constant functions by using this oracle only once. But now we can ask the question: what if we are allowed to query this box in superposition, i.e. what if we can use the power of quantum computing? # # Deutsch was able to show that you could solve this problem now, with quantum computers, using only a single query. To see how this works we need two simple insights. # # Suppose that we prepare the second qubit in the superposition state $|-\rangle=\frac{1}{\sqrt{2}}(|0\rangle-|1\rangle)$ and apply the oracle. Then we can check that # $$ # U_f |x\rangle |-\rangle = U_f|x\rangle \frac{1}{\sqrt{2}}(|0\rangle -|1\rangle ) = |x\rangle \frac{1}{\sqrt{2}}(|f(x)\rangle -|f(x) \oplus 1\rangle ) = (-1)^{f(x)} |x\rangle |-\rangle . # $$ # This is the so called "phase kickback trick". By applying $U_f$ onto a target which is in superposition, the value of the function ends up showing up in the global phase. # # How can we leverage this to distinguish between the constant and balanced functions? Note that for the constant functions the phase that is applied is the same for all inputs $|x\rangle$, whereas for the balanced functions the phase is different for each value of $x$. In other words, if we use the phase kickback trick then for each of the oracles we apply the following transform on the first qubit: # # $$ # \begin{eqnarray} # f_0 \rightarrow I, && # f_1 \rightarrow -I, && # f_x \rightarrow Z, && # f_\bar{x} \rightarrow -Z && # \end{eqnarray} # $$ # # Now we only need, on the first qubit, to distinguish between the identity gate and the $Z$ gate. But we can do this by recalling the identity # # $$ # H Z H = X # $$ # # where $H$ is the Hamadard gate. # # This means that we can turn a phase flip into a bit flip by applying Hadamards before and after the phase flip. If we look at the constant and balanced functions we see that this means that the constant functions will be proportional to $I$ and the balanced functions will be proportional to $X$. If we feed in $|0\rangle$ to this register, then in the first cases we will only see $|0\rangle$ and in the second case we will only see $|1\rangle$. In other words we will be able to distinguish constant from balanced using a single query of the oracle. # # Let's code this up. # + id="aMHzLxztj-gq" """Creating the circuit used in Deutsch's algorithm.""" def deutsch_algorithm(oracle): """Returns the circuit for Deutsch's algorithm given an input oracle, i.e., a sequence of operations to query a particular function. """ yield cirq.X(q1) yield cirq.H(q0), cirq.H(q1) yield oracle yield cirq.H(q0) yield cirq.measure(q0) for key, oracle in oracles.items(): print(f"Circuit for f_{key}:") print(cirq.Circuit(deutsch_algorithm(oracle)), end="\n\n") # + [markdown] id="Zy-2ysxzweyv" # Lets run these circuits a bunch of times to see that the measurement result ends up correctly distinguishing constant from balanced. # + id="ImffrBgJvLme" """Simulate each of the circuits.""" simulator = cirq.Simulator() for key, oracle in oracles.items(): result = simulator.run(cirq.Circuit(deutsch_algorithm(oracle)), repetitions=10) print('oracle: f_{:<4} results: {}'.format(key, result)) # + [markdown] id="845af7bfc7d6" # We interpret the simulation results as follows: # # - For the first two functions $f_0$ and $f_1$, we always measure $0$. Therefore, we know that these functions are constant. # - For the second two functions $f_x$ and $f_{\bar{x}}$, we always measure $1$. Therefore, we know that these functions are balanced. # + [markdown] id="5edb6fe11163" # ### Exercise: Two Bit Deutsch-Jozsa Algorithm # + [markdown] id="v4ka34Kuj4K0" # All boolean functions for one input bit are either constant or balanced. For boolean functions from two input bits not all functions are constant or balanced. There are two constant functions, $f(x_0, x_1) = 0$ and $f(x_0, x_1)=1$, while there are ${4 \choose 2} = 6$ balanced functions. The following code gives you the operations for these functions where we take two input qubits and compute the function in the third qubit. # + id="V5ZCXGCrxl4k" """Operations to query all possible functions on two bits. Two of these functions are constant, and six of these functions are balanced. """ # Define three qubits to use. q0, q1, q2 = cirq.LineQubit.range(3) # Define the operations to query each of the two constant functions. constant = ( [], [cirq.X(q2)] ) # Define the operations to query each of the six balanced functions. balanced = ( [cirq.CNOT(q0, q2)], [cirq.CNOT(q1, q2)], [cirq.CNOT(q0, q2), cirq.CNOT(q1, q2)], [cirq.CNOT(q0, q2), cirq.X(q2)], [cirq.CNOT(q1, q2), cirq.X(q2)], [cirq.CNOT(q0, q2), cirq.CNOT(q1, q2), cirq.X(q2)] ) # + [markdown] id="mr9ltXSJyB79" # An extension of Deutsch's orginal algorithm is the Deutsch-Jozsa algorithm, which can distinguish constant from balanced functions like these using a single query to the oracle. **The goal of this exercise** is to write a quantum circuit that can distinguish these. # + id="qJP_e68e1JBs" """Exercise: Write a quantum circuit that can distinguish constant from balanced functions on two bits. """ def your_circuit(oracle): # Your code here! yield oracle # Your code here! yield cirq.measure(q2) # + [markdown] id="3e75a276e239" # You can check your circuit by running the follow cell which simulates the circuit for all oracles. # + id="81da6ec6fc5a" """Check your answer by running this cell.""" simulator = cirq.Simulator() print("\nYour result on constant functions:") for oracle in constant: result = simulator.run(cirq.Circuit(your_circuit(oracle)), repetitions=10) print(result) print("\nYour result on balanced functions:") for oracle in balanced: result = simulator.run(cirq.Circuit(your_circuit(oracle)), repetitions=10) print(result) # + [markdown] id="L6vfs97CFZB2" # #### Solution # + cellView="both" id="mUvm9rmRFb4p" #@title def dj_circuit(oracle): # Phase kickback trick. yield cirq.X(q2), cirq.H(q2) # Get an equal superposition over input bits. yield cirq.H(q0), cirq.H(q1) # Query the function. yield oracle # Use interference to get result, put last qubit into |1>. yield cirq.H(q0), cirq.H(q1), cirq.H(q2) # Use a final OR gate to put result in final qubit. yield cirq.X(q0), cirq.X(q1), cirq.CCX(q0, q1, q2) yield cirq.measure(q2) # + [markdown] id="79348c9be8a9" # As above, we can check the solution by running the circuit with each of the oracles. # + id="c1b1e989dab2" """Simulate the Deutsch-Jozsa circuit and check the results.""" print("Result on constant functions:") for oracle in constant: result = simulator.run(cirq.Circuit(dj_circuit(oracle)), repetitions=10) print(result) print("\nResult on balanced functions:") for oracle in balanced: result = simulator.run(cirq.Circuit(dj_circuit(oracle)), repetitions=10) print(result) # + [markdown] id="2d62c4a01340" # As with the single-bit case (Deutsch's algorithm), we always measure $0$ for constant functions and always measure $1$ for balanced functions. # + [markdown] id="mmhbPIhT4YAI" # ## Gates # + [markdown] id="F5ubTic94X79" # Cirq comes with a plethora of common gates. Here we show a few of them. # + id="iIpoDaqK4yjV" """Examples of common gates defined in Cirq.""" # Get some qubits. q0, q1, q2 = cirq.LineQubit.range(3) # Get a bunch of common gates defined in Cirq. ops = [ cirq.X(q0), # Pauli-X. cirq.Y(q1), # Pauli-Y. cirq.Z(q2), # Pauli-Z. cirq.CZ(q0,q1), # Controlled-Z gate. cirq.CNOT(q1,q2), # Controlled-X gate. cirq.H(q0), # Hadamard gate. cirq.T(q1), # T gate. cirq.S(q2), # S gate. cirq.CCZ(q0, q1, q2), # Controlled CZ gate. cirq.SWAP(q0, q1), # Swap gate. cirq.CSWAP(q0, q1, q2), # Controlled swap gate. cirq.CCX(q0, q1, q2), # Toffoli (CCNOT) gate. cirq.ISWAP(q0, q1), # ISWAP gate. cirq.rx(0.5 * np.pi)(q0), # Rotation about X. cirq.ry(0.5 * np.pi)(q1), # Rotation about Y. cirq.rz(0.5 * np.pi)(q2), # Rotation about Z. cirq.X(q0) ** 0.5, # Sqrt of NOT gate. ] # Display a circuit with all of these operations. print(cirq.Circuit(ops)) # + [markdown] id="FKRGtncT7R0v" # For each of these gates, you can figure out how they act on the computational basis by calling `cirq.unitary` on the gate. For example, to see the unitary of `CNOT`, we can do: # + id="7SUAT5F17afR" """Get the unitary of CNOT.""" print(cirq.unitary(cirq.CNOT)) # + [markdown] id="flm4CcqT2DoI" # For single qubit gates, we have named gates like `cirq.H` for the Hadmard gate as well as the single qubit rotation gates defined as follows. # # $$ # {\tt cirq.rx(ฮธ)}: \exp(-i \theta X) = cos \theta I - i \sin \theta X =\left[ \begin{array} ~\cos \theta & -i \sin \theta \\ -i \sin \theta & \cos \theta\end{array} \right] \\ # $$ # $$ # {\tt cirq.ry(ฮธ)}: \exp(-i \theta Y) = cos \theta I - i \sin \theta Y =\left[ \begin{array} ~\cos \theta & -\sin \theta \\ \sin \theta & \cos \theta\end{array} \right] \\ # $$ # $$ # {\tt cirq.rz(ฮธ)}: \exp(-i \theta Z) = cos \theta I - i \sin \theta Z =\left[ \begin{array} ~e^{i \theta} & 0 \\ 0 & e^{-i \theta} \end{array} \right] \\ # $$ # # In addition to `cirq.unitary` another important method (behind the scenes, anyways) is `cirq.apply_unitary`. This allows you to apply a unitary gate onto a state. Of course we could have applied the unitary directly to the state, using `cirq.unitary`. We'll see below in understanding how these methods are implemented that the `cirq.apply_unitary` can be used to apply the gate more directly onto the state and can save allocations of memory to store the unitary. # # If we apply `cirq.rx` to a state we can see how it rotates the state. To do this let us introduce a new simulate method `simulate_moment_steps`. This allows us to simulate the circuit `Moment` by `Moment`. At each point we can access the state. For example here we can use this to create a circuit that is a series of small `cirq.rx` rotations and plot the probablility of measuring the state in the $|0\rangle$ state: # + id="UgoNBN1H8B6h" """Plot the probability of measuring a qubit in the ground state.""" # Get a qubit. a = cirq.NamedQubit('a') # Get a circuit of a bunch of X rotations. circuit = cirq.Circuit([cirq.rx(np.pi / 50.0)(a) for theta in range(200)]) # List to store probabilities of the ground state. probs = [] # Step through the simulation results. for step in simulator.simulate_moment_steps(circuit): prob = np.abs(step.state_vector()) ** 2 probs.append(prob[0]) # Plot the probability of the ground state at each simulation step. plt.style.use('seaborn-whitegrid') plt.plot(probs, 'o') plt.xlabel("Step") plt.ylabel("Probability of ground state"); # + [markdown] id="6q0YkXSZBvzd" # Above we have given ourselves direct access to the wave function and calculated the exact probabilities. Suppose we wanted to sample from the wave function at each point instead. # + id="iynhJEvoCIro" """Plot the probability of measuring a qubit in the ground state by sampling.""" # Number of times to sample. repetitions = 100 # List to store the probability of the ground state. sampled_probs = [] for i, step in enumerate(simulator.simulate_moment_steps(circuit)): samples = step.sample([a], repetitions=repetitions) prob = np.sum(samples, axis=0)[0] / repetitions sampled_probs.append(prob) # Plot the probability of the ground state at each simulation step. plt.style.use('seaborn-whitegrid') plt.plot(sampled_probs, 'o') plt.xlabel("Step") plt.ylabel("Probability of ground state"); # + [markdown] id="2483adccd339" # ## Custom gates # + [markdown] id="RACz4Z5GDMAf" # Supose there is a gate that you want Cirq to support, but it is not implemented in Cirq. How do you go about adding a new gate? # # Cirq tries to be Pythonic. One way in which it does this is that it relies on Python's **protocol** pattern. Protocols are similar to interfaces, in that they define a collection of methods that an object must support to implement a protocol, but different in that this requirement is more informal and not a part of a class or interface declaration. An object supports a protocol if it implements the methods that the protocol defines. You're probably familiar with this if you've ever done something like defined your own `Container` in Python. To do this for an object you simply define the `__contains__`, `__setitem__`, and `__getitem__` methods on your object, and then you can use this object anywere the Container protocol is supported. # # Let's see how this works for defining a custom gate. The gate we will define is a single qubit gate that has only rational amplitudes. This is based on the famous 3, 4, 5 triangle you may remember from a long ago math class: $3^2 + 4^2 = 5^2$. Using this observation we can construct normalized vectors and a unitary transform using the ratios of $3$, $4$, and $5$: # $$ # \zeta =\left[ \begin{array} # ~\frac{3}{5} & \frac{4}{5} \\ # -\frac{4}{5} & \frac{3}{5} # \end{array} \right] # $$ # # Below is a simple implementation of this gate in Cirq. To do this we simply define a class that inherits from `cirq.SingleQubitGate` and implements the `cirq.SupportsUnitary` protocol by implementing the `_unitary_(self)` method. We also define an optional `__str__` representation which Cirq will use when printing this gate out in a circuit diagram. # + id="Y2a7t2qmLDTb" """Example of defining a custom gate in Cirq.""" class RationalGate(cirq.SingleQubitGate): def _unitary_(self): return np.array([[3 / 5, 4 / 5], [-4 / 5, 3 / 5]]) def __str__(self): return 'ฮถ' # + [markdown] id="6d262b57bb0c" # We can now use this custom gate just like any other gate in Cirq. # + id="28f06d1baf9b" """Using the custom gate in a circuit.""" a = cirq.NamedQubit('a') rg = RationalGate() print(cirq.Circuit(rg(a))) # + [markdown] id="3132dcbe8413" # We can also get its unitary, as shown below, because the `RationalGate` defines a `_unitary_` method. # + id="x9dHKNfgMoyz" print(cirq.unitary(rg)) # + [markdown] id="xmF-uscmPP_G" # Let's check that we can use this gate in a simulation. # + id="_RXBrSQ8PWnu" """Simulate a circuit with a custom gate.""" circuit = cirq.Circuit(rg(a)) simulator = cirq.Simulator() result = simulator.simulate(circuit) print(result.final_state_vector) # + [markdown] id="9da706cd9038" # > *Note on simulating circuits with custom gates.* The `_unitary_` method is extremely inefficient for gates over many qubits. In most cases the method `_apply_unitary_` will be used instead, if it is available. # This method allows much more fine grained control on how a unitary is applied to a state, but it is harder to implement, for example because it is expected to use the pre-allocated workspace buffer that was given to it. # Almost all of the basic gates we have defined in Cirq have this method implemented. If you need to get performant, custom multi-qubit gates, you should implement a custom `_apply_unitary_` method for such gates. # + [markdown] id="y63aDja8R4rc" # ### Exercise: Custom Controlled Rx gate # # Recall that the `cirq.rx` gate is a rotation about the $X$ Pauli axis: # $$ # {\tt cirq.rx(ฮธ)}: \exp(-i \theta X) = cos \theta I - i \sin \theta X =\left[ \begin{array} ~\cos \theta & -i \sin \theta \\ -i \sin \theta & \cos \theta\end{array} \right] . \\ # $$ # # As an exercise, create a two-qubit controlled `cirq.rx` gate defined as follows: # $$ # {\tt CRx(\theta)}: # \left[\begin{array} # ~1 & 0 & 0 & 0 \\ # 0 & 1 & 0 & 0 \\ # 0 & 0 & \cos \theta & -i \sin \theta \\ # 0 & 0 & -i \sin \theta & \cos \theta # \end{array} \right] . # $$ # + id="9htgTzqAYHsA" """Define a custom controlled cirq.rx gate here.""" class CRx(cirq.TwoQubitGate): def __init__(self, theta): self.theta = theta def _unitary_(self): return np.array([ # Your code here! ]) # Print out its unitary. print(np.around(cirq.unitary(CRx(0.25 * np.pi)))) # + [markdown] id="7SoneHQRGeUW" # #### Solution # + id="XaG8n5bdGgf2" """Defining a custom controlled cirq.rx gate.""" class CRx(cirq.TwoQubitGate): def __init__(self, theta): self.theta = theta def _unitary_(self): return np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(self.theta), -1j * np.sin(self.theta)], [0, 0, -1j * np.sin(self.theta), np.cos(self.theta)] ]) def _circuit_diagram_info_(self, args): return '@', 'rx({}ฯ€)'.format(self.theta / np.pi) # Print out its unitary. print(np.around(cirq.unitary(CRx(0.25 * np.pi)))) # + [markdown] id="af7add86ab34" # Note that we also define the `_circuit_diagram_info_` method which tells Cirq how to display the gate in a circuit diagram. The first string in the tuple is the symbol for the top wire, and the second string in the tuple is the symbol for the bottom wire. We can use this in a circuit to see the diagram info as shown below. # + id="a1cd089df7ba" """Display a circuit with the custom gate.""" # Get qubits. a = cirq.NamedQubit('a') b = cirq.NamedQubit('b') # Display the circuit. print('Circuit diagram:') print(cirq.Circuit(CRx(0.25 * np.pi)(a, b))) # + [markdown] id="9W_Vlgpzz0D1" # ### Gate decompositions # + [markdown] id="Uun4giev4N8F" # In many contexts, the notion of what gate you can apply is defined either by the physical hardware you are running or perhaps by the quantum error correcting code you are working with. In quantum computing we typically talk about **gate sets** and work with respect to a given gate set. Cirq supports gate decomposition via the `decompose` protocol. Below we show how one can construct a gate that can be decomposed into two gates. # + id="9G-9_29h09Mx" """Example of a custom gate which supports the decompose protocol.""" class HXGate(cirq.SingleQubitGate): def _decompose_(self, qubits): return cirq.H(*qubits), cirq.X(*qubits) def __str__(self): return 'HX' # + [markdown] id="1a88ad617867" # We can use this gate in a circuit as follows. # + id="370e8528c762" """Use the gate in a circuit.""" HX = HXGate() a = cirq.NamedQubit('a') circuit = cirq.Circuit(HX(a)) print(circuit) # + [markdown] id="eb4e74261590" # The symbol `HX` is a single gate, not a product of two gates. We can decompose the `HXGate` using `cirq.decompose` as shown below. # + id="47ec94cdecf3" """Decompose the gate.""" print(cirq.Circuit(cirq.decompose(circuit))) # + [markdown] id="RLF1narS66iB" # Note that this not only decomposed the `HX` gate into `H` and `X`, it also decomposed `H` into `Y**0.5` and `X`. In order to decompose only once, one can use `cirq.decompose_once`: # + id="AS-YMmAv6zUg" """Decompose the gate once.""" print(cirq.Circuit(cirq.decompose_once(HX(a)))) # + [markdown] id="sQ5N0UR26G_v" # When we define a gate, it is good practice to give a default decomposition in terms of common gates. However, often you will want to change this decomposition at run time for the specific hardware or context you are working in. To do this we can define an interceptor function which does this decomposition before falling back to the default. # + id="0sJ1uY6X7l3t" """Define a custom decomposer.""" def my_decompose(op): if isinstance(op, cirq.GateOperation) and isinstance(op.gate, HXGate): return cirq.Z(*op.qubits), cirq.H(*op.qubits) # Decompose the circuit according to this custom decomposer. cirq.Circuit(cirq.decompose(HX(a), intercepting_decomposer=my_decompose)) # + [markdown] id="OChSQ05Z06mL" # You can also define a predicate that says which gates to keep without decomposing further. This predicate should return `True` for all gates that should not be decomposed further, and `False` for all gates that should be decomposed further. # # > The default predicate is to only keep gates that cannot be decomposed. # + id="KQ2in0ol05S9" """Define a predicate of which gates to keep without decomposing.""" def keep_h_and_x(op): return isinstance(op, cirq.GateOperation) and op.gate in [cirq.H, cirq.X] # Decompose the HXGate using a custom predicate for which gates to not decompose. print(cirq.decompose(HX(a), keep=keep_h_and_x)) # + [markdown] id="e7486a8ea9d1" # In this case, we see that neither `H` nor `X` have been decomposed, as we have specified in `keep_h_and_x`. # + [markdown] id="3HtlMxa6QpVo" # ## Parameterized Circuits # # In addition to circuit gates with fixed values, Cirq also supports parameterized gates with symbolic values via `sympy`. These are placeholder values, such as `sympy.Symbol('x')`, that will only be resolved at *run-time*. For simulators these values are resolved by providing a `ParamResolver`. A `ParamResolver` provides a map from the `Symbol`'s name to its assigned value. # # > Plain Python dictionaries can also be used whenever a `ParamResolver` is needed. # + id="0afe36a32636" """Define a circuit with parameterized gates.""" # Import sympy for parameterized values. import sympy as sp # Get qubits to use in the circuit. a = cirq.NamedQubit("a") b = cirq.NamedQubit("b") # Define a parameterized value. val = sp.Symbol("s") # Create a circuit. circuit = cirq.Circuit(cirq.X.on(a) ** val, cirq.X.on(b) ** val) # Display it. print("Circuit with parameterized gates:\n") print(circuit) # + [markdown] id="c67ac3447a5f" # When we simulate this circuit, we must provide a `param_resolver` as mentioned. # + id="TIaVRzCD4deU" """Simulate the circuit at multiple parameter values.""" simulator = cirq.Simulator() # Simulate the circuit for several values of the parameter. for y in range(5): result = simulator.simulate(circuit, param_resolver={"s": y / 4.0}) print("s={}: {}\n".format(y, np.around(result.final_state_vector, 2))) # + [markdown] id="1_2i73Oo4aM1" # Here we see that the `Symbol` is used in two gates, and then the resolver provides this value at run time. # # Parameterized values are most useful in defining what we call a `Study`. A `Study` is a collection of trials, where each trial is a run with a particular set of configurations and which may be run repeatedly. Running a study returns a list of `TrialResult`s per set of fixed parameter values and repetitions. Example: # # + id="Gj_Y3Lrh49o9" """Simulate the circuit at multiple parameter values.""" # Get a list of param resolvers. resolvers = [cirq.ParamResolver({'s': y / 8.0}) for y in range(5)] # Add measurements to the circuit. circuit.append([cirq.measure(a), cirq.measure(b)]) # Simulate the circuit using run_sweep. results = simulator.run_sweep( program=circuit, params=resolvers, repetitions=10 ) for i, result in enumerate(results): print('params: {}\n{}\n'.format(result.params.param_dict, result)) # + [markdown] id="av09E7pH44YO" # Above we passed in a list of `ParamResolver`s to the `params` parameter of `run_sweep`. But one can also pass in a `Sweepable`. There are some useful methods for generating `Sweepable`s, for example to generate an equally spaced set of param resolvers one can use `Linspace` # # + id="zOymGxlb72Fk" """Alternative method of getting a sequence of param resolvers.""" linspace = cirq.Linspace(start=0, stop=1.0, length=11, key='x') for p in linspace: print(p) # + [markdown] id="r-CjbPwkRI_I" # ### Exercise: Rotate a qubit # # Let's do the equivalent of a Rabi-flop experiment. That is, let's apply a `XPowGate` rotating about the `X` axis for a linearly spaced set of values followed by a computational basis measurement. The end result should be a plot of the sampled fraction that were $|1\rangle$ as a function of gates of $X^t$ for $t$ between 0 and $1$ for 100 values of $t$ and each result sampled 100 times. # + id="8yW2e3sq9JM8" # Your code here! # + [markdown] id="930ee2edd71b" # ## Noise # + [markdown] id="FEM73JPALREa" # In addition to circuits with unitary gates, Cirq also has support for modeling noisy quantum evolutions. This is useful when modeling what will happen when running on actual hardware. # # Cirq currently supports noise that fits within the context of *operator sum representations* of noise (a.k.a quantum operations, quantum dyanamical maps, superoperators, etc). This formalism models the evolution of a density matrix via # # $$ # \rho \rightarrow \sum_k A_k \rho A_k^\dagger # $$ # # where the $A_k$ are *Kraus operators*. These operators are not necessarily unitary and satisfy the property # # $$ # \sum_k A_k^\dagger A_k = I . # $$ # # An example of a noise operator is the depolarizing channel on one qubit. This takes # # $$ # \rho \rightarrow (1-p) \rho + \frac{p}{3} (X \rho X + Y \rho Y + Z \rho Z) . # $$ # # In Cirq we can define such a channel and use it in a quantum circuit: # + id="YclVFbKZ0aD4" """Create a circuit with a depolarizing channel.""" circuit = cirq.Circuit(cirq.depolarize(0.2)(a), cirq.measure(a)) print(circuit) # + [markdown] id="CvteYd9s00t_" # Previously we saw that gates could implement that `_unitary_` protocol, and by doing so they could be used to perform wave function simulation. For noise the gates implement the `_channel_` protocol. Classes that implement this protocol return the Krauss operators on their `_channel_` method. Thus # + id="0ig_NSrS12PE" for i, kraus in enumerate(cirq.channel(cirq.depolarize(0.2))): print(f"Kraus operator {i} is:", kraus, sep="\n", end="\n\n") # + [markdown] id="4ee0055e4015" # The Kraus operators are often more conveniently represented in a Pauli basis. We can do this in Cirq as shown below. # + id="a2e5258ae33d" for i, krauss in enumerate(cirq.channel(cirq.depolarize(0.2))): pauli_ex = cirq.expand_matrix_in_orthogonal_basis(krauss, cirq.PAULI_BASIS) print(f"Kraus operator {i} is:", pauli_ex, sep="\n", end="\n\n") # + [markdown] id="spU2l26r0tQS" # In addition to the wavefunction simulator, Cirq also has a density matrix simulator. Instead of keeping track of the wavefunction, this simulator keeps track of the density matrix. It has the same `run` and `simulate` type methods. For example we can use this to simulate depolarizing channel and return the final density matrix of the system. # + id="skLIvXYq4yvX" """Example of simulating a noisy circuit with the density matrix simulator.""" # Circuit to simulate. circuit = cirq.Circuit(cirq.depolarize(0.2)(a)) print('Circuit:\n{}\n'.format(circuit)) # Get the density matrix simulator. simulator = cirq.DensityMatrixSimulator() # Simulate the circuit and get the final density matrix. matrix = simulator.simulate(circuit).final_density_matrix print('Final density matrix:\n{}'.format(matrix)) # + [markdown] id="Eudp7NYo51LT" # One thing to note is that the density matrix simulator simulates measurement statistically, and not as a channel where the outcome is not known. Consider the following example. # + id="_SjPRrIX5F4O" """Simulating a circuit with measurements using the DensityMatrixSimulator.""" # Get a circuit with measurements. circuit = cirq.Circuit(cirq.depolarize(0.5)(a), cirq.measure(a)) # Simulate with the density matrix multiple times. dmat1 = simulator.simulate(circuit).final_density_matrix dmat2 = simulator.simulate(circuit).final_density_matrix print(np.allclose(dmat1, dmat2)) # + [markdown] id="286ab4baf9fd" # Because the final density matrix is statistical due to the measurements, the output of the above cell will change when executed multiple times. # + [markdown] id="3qUM-Kpi8Iy4" # ### Monte carlo simulations # # Density matrix simulations are more expensive than pure state wave function simulations. However some channels allow an interpreation of randomly applying one of a fixed set of unitaries with differing probabilites. For example the depolarizing channel above can be interpretted as: # # - With probability $1-p$ apply the identity to the state, and # - with probability $p$ apply one of the three Pauli matrices $X$, $Y$, or $Z$ with equal probability. # # Channels that can be interpretted in this form can be simulating using a wavefunction simulator: when this channel is simulated the simulation will sample a unitary with the appropriate probability. # # For channels of these type, the channel can, instead of implementing the `_channel_` protocol, implement the `_mixture_` protocol: # + id="9Pt7o-Tq2SNz" """Use the cirq.mixture protocol on the cirq.depolarize channel.""" for p, u in cirq.mixture(cirq.depolarize(0.2)): print("prob = {}\nunitary: \n{}\n".format(p, u)) # + [markdown] id="z4FIsadc4Nmg" # In fact the depolarizing channel does not implement `_channel_`. Instead it only implements `_mixture_` and the `cirq.channel` method notices this and derives the channel from the mixture. # + id="HvhpBD334o1v" """Check if cirq.depolarize has _channel_ and _mixture_ methods.""" # Get a depolarizing channel. d = cirq.depolarize(0.2) # Check if it has _channel_ implemented. print('does cirq.depolarize(0.2) have _channel_? {}'.format('yes' if getattr(d, '_channel_', None) else 'no')) # Check if it has _mixture_ implemented. print('does cirq.depolarize(0.2) have _mixture_? {}'.format('yes' if getattr(d, '_mixture_', None) else 'no')) # + [markdown] id="OMCyapOd28h2" # When channels implement mixture then, as we said, we can use the wavefunction simulator: # + id="vDEhGG0v-UJy" """Use the wavefunction simulator on a channel that implements the mixture protocol.""" circuit = cirq.Circuit(cirq.depolarize(0.5).on(a), cirq.measure(a)) simulator = cirq.Simulator() result = simulator.run(circuit, repetitions=10) print(result) # + [markdown] id="a13d48d4836f" # Because the unitary Kraus operators are applied stochastically, executing the above cell multiple times will produce different outputs. # + [markdown] id="weWPzaPx8zly" # ### Adding noise to circuits and simulations # + [markdown] id="X-dT_CLu0fH9" # To add noise to circuits or during simulations, we provide the notion of a `NoiseModel`. A `NoiseModel` may add noise operation by operation, or it may add noise moment by moment, or it may add noise across a list of moments. # # For example we can define a noise model that add a single qubit depolarizing for every qubit in each moment. # + id="PfRP7K598wNQ" """Adding noise to a circuit.""" # Get a noiseless circuit. noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.2)) circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.measure(a, b)) print('Circuit with no noise:\n{}\n'.format(circuit)) # Add noise to the circuit. system_qubits = sorted(circuit.all_qubits()) noisy_circuit = cirq.Circuit() for moment in circuit: noisy_circuit.append(noise.noisy_moment(moment, system_qubits)) print('Circuit with noise:\n{}'.format(noisy_circuit)) # + [markdown] id="e4946d8f236f" # We can also pass a noise model into the `cirq.DensityMatrixSimulator` and execute a noisy circuit in this manner. # + id="uzxaFCGIz2aQ" """Perform noisy simulation by defining a density matrix simulator with a noise model.""" # Define a noise model. noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.2)) # Pass this noise model into the simulator. simulator = cirq.DensityMatrixSimulator(noise=noise) # Get a circuit to simulate. circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.measure(a, b)) # Simulate the circuit in steps. for i, step in enumerate(simulator.simulate_moment_steps(circuit)): print('After step {} state was\n{}\n'.format(i, step.density_matrix())) # + [markdown] id="6308948de99a" # ## Devices # + [markdown] id="PvJCA3e0QsuI" # NISQ algorithms work in a regime where every gate counts. A key philosophy behind Cirq is that we believe the details of the hardware, the performance characteristics, as well as device constraints, will be key to getting the most out of NISQ algorithms. Towards this end these hardware features are contained in the `Device` class. # # For example, here is Google's Bristleconde device which we printed out at the start of this notebook. # + id="BmzxGpDB9jJ4" print(cirq.google.Bristlecone) # + [markdown] id="GGejgRf0AGni" # In a future version, we intend for each `Device` to define a noise model. # # `Device`s also contain more information about the timing of the device. For example here we can calculate the duration of an `X` on the `Bristlecone` device. # + id="HAwdWkprAPXN" """Get the duration of an operation.""" brissy = cirq.google.Bristlecone op = cirq.X.on(cirq.GridQubit(5, 5)) print(brissy.duration_of(op)) # + [markdown] id="IxnyURhnAdph" # Another property of devices is that they can be used to enforce constraints from the hardware, both checking that these constraints are satisfied, but also enforcing the constraints on the device. For example, on the `Bristlecone` device, a two-qubit gate has the property that one cannot simultaneously perform a pair of two-qubit gates that act on adjacent qubits. So for example if we create such a `Circuit` and validate it using the device, we will see an error. # + id="r5F4FUtmA5kW" """Validate operations on a device.""" # Get adjacent qubits on the Bristlecone device. q55 = cirq.GridQubit(5, 5) q56 = cirq.GridQubit(5, 6) q66 = cirq.GridQubit(6, 6) q67 = cirq.GridQubit(6, 7) # Define operations on adjacent qubits. ops = [cirq.CZ(q55, q56), cirq.CZ(q66, q67)] circuit = cirq.Circuit(ops) print(circuit) # cirq.google.Bristlecone.validate_circuit(circuit) # (this should throw an error) # + [markdown] id="XkNPbeFDBTL4" # But more interestingly we could have passed the device into the `Circuit` and it will perform the creation of the circuit (using the insertion semantics as described above) such that the device cannot violate the constraints. # + id="5BOBUIEIBeQ5" """Create a circuit for a particular device.""" # Same adjacent operations as above. ops = [cirq.CZ(q55, q56), cirq.CZ(q66, q67)] # Create a circuit on the Bristlecone device. circuit = cirq.Circuit(device=cirq.google.Bristlecone) # When we append operations now, they are put into different moments. circuit.append(ops) print(circuit) # + [markdown] id="lyFcloaARXg4" # ### Exercise: Make a Device # # Construct a device that acts on a square sized lattice, and only allows Hadamard, CZ, and measurement gates. # + id="zDE-19I_a3on" # Your code here! # + [markdown] id="ed20ea519cc9" # ## Compiling / Optimizing # + [markdown] id="J9ia4eatUQ_x" # Cirq's philosophy for what are often called compilation steps is slightly non-traditional. In particular we believe that NISQ devices perform so few gates that many compiliations will be one or a few passes. A useful class of optimization code for this is the `PointOptimizer`. For example, here is a `PointOptimizer` that recognizes that a `X` gate followed by a `Z` gate is equivalent to a `Y` gate # + id="l7eFMVe1GEe2" """Example of defining a custom cirq.PointOptimizer.""" class XZOptimizer(cirq.PointOptimizer): """Replaces an X followed by a Z with a Y.""" def optimization_at(self, circuit, index, op): # Is the gate an X gate? if isinstance(op, cirq.GateOperation) and (op.gate == cirq.X): next_op_index = circuit.next_moment_operating_on(op.qubits, index + 1) qubit = op.qubits[0] if next_op_index is not None: next_op = circuit.operation_at(qubit, next_op_index) if isinstance(next_op, cirq.GateOperation) and (next_op.gate == cirq.Z): new_op = cirq.Y.on(qubit) return cirq.PointOptimizationSummary( clear_span = next_op_index - index + 1, clear_qubits=op.qubits, new_operations=[new_op]) opt = XZOptimizer() circuit = cirq.Circuit(cirq.X(a), cirq.Z(a), cirq.CZ(a, b), cirq.X(a)) print("Before optimizing:\n{}\n". format(circuit)) opt.optimize_circuit(circuit) print("After optimizing:\n{}".format(circuit)) # + [markdown] id="--aUfkiaUb3S" # ### Exercise: Simplify flipped CNOTs # # Write an PointOptimizer that performs (greedily) the simplification that # # ``` # a: โ”€โ”€โ”€Hโ”€โ”€โ”€@โ”€โ”€โ”€Hโ”€โ”€โ”€ # โ”‚ # b: โ”€โ”€โ”€Hโ”€โ”€โ”€Xโ”€โ”€โ”€Hโ”€โ”€โ”€ # ``` # is equal to # # # ``` # a: โ”€โ”€โ”€Xโ”€โ”€โ”€ # โ”‚ # b: โ”€โ”€โ”€@โ”€โ”€โ”€ # ``` # # + id="S0PThmctKFxl" # Your code here! # + id="75D9xMroKNDG" """Test your optimizer on this circuit.""" circuit = cirq.Circuit( cirq.H.on_each(a, b, c), cirq.CNOT(a, b), cirq.H.on_each(a, b), cirq.CZ(a, b) ) # Instantiate your optimizer # my_opt = # And check that it worked. # print(my_opt.optimizer_circuit(circuit)) # + [markdown] id="82c692df49dd" # ## Google's Xmon Gates # + [markdown] id="gM1ZW7TstvK9" # We built Cirq at Google because we thought that it was important that hardware details flow up through the level of abstractions in a framework for NISQ algorithms. And we also built it because Google has a class of superconducting qubit devices where we want to explore NISQ algorithms. # # What sort of gates does the xmon architecture support? At the hardware level the basic gate set is rotations about the Pauli $Z$ axis # # $$ # \exp(-iZt) = \left[ # \begin{matrix} # e^{it} & 0 \\ # 0 & e^{-it} # \end{matrix} # \right] # $$ # # Rotations about any axis in the Pauli $X$ and $Y$ plane: # # $$ # \exp(-i(\cos \theta X + \sin \theta Y) t) = # \left[ # \begin{matrix} # \cos t & -i\sin t e^{-i \theta} \\ # -i\sin t e^{i \theta} & \cos t # \end{matrix} # \right] # $$ # # The two qubit gate is a phase about the $|11\rangle$ state: # # $$ # \exp(-it |11\rangle \langle 11|) = # \left[ # \begin{matrix} # 1 & 0 & 0 & 0 \\ # 0 & 1 & 0 & 0 \\ # 0 & 0 & 1 & 0 \\ # 0 & 0 & 0 & \exp(-it) # \end{matrix} # \right] # $$ # # And finally measurements in the computational basis. # # Many of the common gates in Cirq are directly accessible on this hardware. To check whether the gate you are using is in the xmon gate set, one can use the `is_supported_operation` as shown below. # + id="feb179abdf97" """Check if an operation is supported by the XMON gate set.""" # Get the XMON gate set. xmon = cirq.google.gate_sets.XMON # Check if X is supported. xmon.is_supported_operation(cirq.X.on(cirq.NamedQubit("a"))) # + [markdown] id="d6031d18fecd" # Thus, `cirq.X` is supported by the xmon gate set. As mentioned above, `cirq.CNOT` is not supported, so the following code block should return `False`. # + id="d168c7619994" """Check if CNOT is supported.""" xmon.is_supported_operation(cirq.CNOT.on(cirq.NamedQubit('a'), cirq.NamedQubit('b'))) # + [markdown] id="4FEVSq1T2n2J" # When a gate is not a native xmon gate, Cirq can often convert it to an xmon native gate. # + id="AdqDGjqL2lI2" """Convert a gate to xmon gates.""" # Get a converter. converter = cirq.google.ConvertToXmonGates() # Do the conversion. converted = converter.convert(cirq.CNOT.on(cirq.NamedQubit("a"), cirq.NamedQubit("b"))) print(cirq.Circuit(converted)) # + [markdown] id="iR8zA_uY4HRZ" # This isn't very optimized because what it has done is first rely on the decompose of `CNOT` and then decomposed each of these into native xmon gates. There are many tricks that one can use to simplify xmon gates. To apply many of these one can use the `cirq.google.optimize_from_xmon` method: # + id="ePc0hrEU2_yy" """Optimize a circuit for the xmon gate set.""" circuit = cirq.Circuit( cirq.CNOT.on(cirq.NamedQubit("a"), cirq.NamedQubit("b")) ) print(cirq.google.optimized_for_xmon(circuit)) # + [markdown] id="lImnJ5Co4Y1n" # Because xmon gates can be executed on Google hardware, they will need to be transmitted as machine code to the quantum computer. This means that they have a serialized form. We use protobuffers as the serialization. To see what this form looks like we can do the following. # + id="od6ofvow4EoD" """Serialize an operation.""" xmon.serialize_op(cirq.X.on(cirq.GridQubit(5, 5))) # + [markdown] id="b13526b78348" # ## Other interesting things in Cirq # + [markdown] id="tMC-nLE474nH" # **Experiments**. The `cirq.experiments` package can perform and plot the results of some basic experiments for understanding how well a system is performing. # # + id="Ih8YgwX19h2-" result = cirq.experiments.rabi_oscillations( sampler=cirq.Simulator(), # In the future, sampler could point at real hardware. qubit=cirq.LineQubit(0) ) result.plot(); # + [markdown] id="38c8tWcX90Zn" # **Testing**. The `cirq.testing` package has useful debugging and testing methods like `cirq.testing.assert_implements_consistent_protocols` and `cirq.testing.assert_allclose_up_to_global_phase`. # # + id="j7FoZGKv90qe" class InconsistentXGate(cirq.SingleQubitGate): def _decompose_(self, qubits): yield cirq.H(qubits[0]) yield cirq.Z(qubits[0]) yield cirq.H(qubits[0]) def _unitary_(self): return np.array([[0, -1j], [1j, 0]]) # Oops! Y instead of X! # cirq.testing.assert_decompose_is_consistent_with_unitary(InconsistentXGate()) # + [markdown] id="iYqy0qwU9sZy" # **Export**. You can export a circuit as Qasm. # + id="qH7xB-vZ-Jsn" """Export a circuit to Qasm.""" a, b, c = cirq.LineQubit.range(3) circuit = cirq.Circuit(cirq.H(a), cirq.H(c), cirq.CNOT(a, b), cirq.CCZ(a, b, c)) print(circuit.to_qasm()) # + [markdown] id="35b3a411ffae" # You can also export a circuit as QUIL: # + id="951a57e8e0fd" """Export a circuit to QUIL.""" print(circuit.to_quil()) # + [markdown] id="QlgFtRQs-TfQ" # You can also turn a circuit into a link to the drag-and-drop web simulation Quirk (though somewhat inconveniently). # + id="Ydst5b0S9IGE" """Export a circuit to a Quirk URL.""" from cirq.contrib.quirk.export_to_quirk import circuit_to_quirk_url print(circuit_to_quirk_url(circuit))
docs/tutorials/educators/intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dark current: the ideal case # + import numpy as np from scipy import stats # %matplotlib inline from matplotlib import pyplot as plt from image_sim import dark_current, read_noise # - # ## A dark frame measures dark current # # Recall that *dark current* refers to counts (electrons) generated in a pixel because an electron in the pixel happens to have enough energy to "break free" and register as a count. The distribution of electron thermal energies in pixel follows a [Maxwell-Boltzmann distribution](https://en.wikipedia.org/wiki/Maxwell%E2%80%93Boltzmann_distribution) in which most electrons have energy around $kT$, where $T$ is the temperature of the sensor and $k$ is the Boltzmann constant. There is a distribution of energies, though, and occasionally an electron will be high enough energy to jump to the conducting band in the chip, registering the same as an electron excited by a photon. Since the Maxwell-Boltzmann distribution depends on temperature the rate at which dark current appears in a pixel is also expected to depend on temperature. # # A *dark frame* (also called a *dark image*) is an image taken with your camera with the shutter closed. It is the sum of the bias level of your camera, the readout noise, and the dark current. # # You measure the dark current in your camera by taking dark frames. # ## Dark current theory # # The expected signal in a dark frame exposure of time $t$ is proportional to $t$. If we call the dark electrons in an exposure $d_e(t)$ and the dark current $d_c(T)$, where $T$ is the temperature, then # # $$ # d_e(t) = d_c(T) t. # $$ # # For liquid-cooled cameras, particularly ones cooled bu liquid nitrogen, the operating temperature doesn't change. For thermo-electrically cooled cameras one is able to set the desired operating temperature. As a result, you should be able to ignore the temperature dependence of the dark current. # # The thermo-electric coolers can usually cool by some fixed amount below the ambient temperature. Though in principle one could choose to always cool by the same fixed amount, like $50^\circ$C below the ambient temperature, there is an advantage to always running your camera at the same temperature: dark frames taken on one date are potentially useful on another date. If the operating temperature varies then you need to make sure to take dark frames every time you observe unless you carefully characterize the temperature dependence of your dark current. # # It will turn out that for practical reasons -- not all pixels in your camera have the same dark current -- it is usually best to take dark frames every time you observe anyway. # ### Illustration with dark current only, no read noise # # For the purposes of illustrating some of the properties of dark current and dark frames we'll generated some simulated images in which the counts are due to dark current alone. We'll use these values: # # + Dark current is $d_c(T) = 0.1 e^-$/pixel/sec # + Gain is $g = 1.5 e^-$/ADU # + Read noise is 0 $e^-$ dark_rate = 0.1 gain = 1.5 read_noise_electrons = 0 # #### Dark current is a random process # # The dark counts in a dark frame are counts and so they follow a Poisson distribution. The plot below shows the dark current in a number of randomly chosen pixels in 20 different simulated images each with exposure time 100 sec. Note that the counts vary from image to image but that the average is very close to the expected value. # # The expected value of the dark counts for this image are $d_e(t)/g = 6.67~$counts. # + exposure = 100 n_images = 20 n_pixels = 10 image_size = 500 pixels = np.random.randint(50, high=190, size=n_pixels) pixel_values = np.zeros(n_images) pixel_averages = np.zeros(n_images) base_image = np.zeros([image_size, image_size]) plt.figure(figsize=(20, 10)) for pixel in pixels: for n in range(n_images): a_dark = dark_current(base_image, dark_rate, exposure, gain=gain, hot_pixels=False) pixel_values[n] = a_dark[pixel, pixel] plt.plot(pixel_values, label='pixel [{0}, {0}]'.format(pixel), alpha=0.5) pixel_averages += pixel_values plt.plot(pixel_averages / n_pixels, linewidth=3, label='Average over {} pixels'.format(n_pixels)) # plt.xlim(0, n_images - 1) plt.hlines(dark_rate * exposure / gain, *plt.xlim(), linewidth=3, label="Expected counts") plt.xlabel('Image number') plt.ylabel('Counts due to dark current') plt.legend() plt.grid() # - # #### The distribution of dark counts follows a Poisson distribution # # The distribution below shows a normalized histogram of number of pixels as a function of dark counts in each pixel for one of the simulated dark frames. Overlaid on the histogram is a Poisson distribution with a mean of $d_e(t_{exp}) = d_C(T) * t_{exp} / g$, where $t_{exp}$ is the exposure time. # + plt.figure(figsize=(20, 10)) h = plt.hist(a_dark.flatten(), bins=20, align='mid', density=True, label="Histogram of dark current counts"); bins = h[1] pois = stats.poisson(dark_rate * exposure / gain) pois_x = np.arange(0, 20, 1) plt.plot(pois_x, pois.pmf(pois_x), label="Poisson dsitribution, mean of {:5.2f} counts".format(dark_rate * exposure / gain)) plt.xlabel("Dark counts in {} exposure".format(exposure)) plt.ylabel("Number of pixels (area normalized to 1)") plt.legend() plt.grid() # - # ### Illustration with dark current *and* read noise # # Now let's run through the same couple of plots with a non-zero read noise. For the sake of illustration, we'll look at two cases: # # 1. Moderate read noise of 10 $e^-$ per read, typical of a low-end research-grade CCD # 2. Low read noise of 1 $e^-$ per read # # In both cases we'll continue with the parameters above to generate our frames: # # + Dark current is $d_c(T) = 0.1 e^-$/pixel/sec # + Gain is $g = 1.5 e^-$/ADU # + Exposure time 100 sec # # With those choices the expected dark count is 6.67 count, which is 10 $e^-$. That is, not coincidentally, one of the values for read noise that was chosen. # ### Read noise is about the same as the expected dark current # # In this first case, the read noise and the dark current are both 10$e^-$. high_read_noise = 10 # + pixels = np.random.randint(50, high=190, size=n_pixels) pixel_values = np.zeros(n_images) pixel_averages = np.zeros(n_images) base_image = np.zeros([image_size, image_size]) darks = np.zeros([n_images, image_size, image_size]) plt.figure(figsize=(20, 10)) for n in range(n_images): darks[n] = dark_current(base_image, dark_rate, exposure, gain=gain, hot_pixels=False) darks[n] = darks[n] + read_noise(base_image, high_read_noise, gain=gain) for pixel in pixels: for n in range(n_images): pixel_values[n] = darks[n, pixel, pixel] plt.plot(pixel_values, label='pixel [{0}, {0}]'.format(pixel), alpha=0.5) pixel_averages += pixel_values image_average = darks.mean(axis=0) plt.plot(pixel_averages / n_pixels, linewidth=3, label='Average over {} pixels'.format(n_pixels)) # plt.xlim(0, n_images - 1) plt.hlines(dark_rate * exposure / gain, *plt.xlim(), linewidth=3, label="Expected counts") plt.xlabel('Image number') plt.ylabel('Counts due to dark current') plt.legend() plt.grid() # - def plot_dark_with_distributions(image, rn, dark_rate, n_images=1, show_poisson=True, show_gaussian=True): h = plt.hist(image.flatten(), bins=20, align='mid', density=True, label="Dark current counts"); bins = h[1] expected_mean_dark = dark_rate * exposure / gain pois = stats.poisson(expected_mean_dark * n_images) pois_x = np.arange(0, 300, 1) new_area = np.sum(1/n_images * pois.pmf(pois_x)) if show_poisson: plt.plot(pois_x / n_images, pois.pmf(pois_x) / new_area, label="Poisson dsitribution, mean of {:5.2f} counts".format(expected_mean_dark)) plt.xlim(-20, 30) if show_gaussian: gauss = stats.norm(loc=expected_mean_dark, scale=rn / gain) gauss_x = np.linspace(*plt.xlim(), num=10000) plt.plot(gauss_x, gauss.pdf(gauss_x), label='Gaussian, standdard dev is read noise in counts') plt.xlabel("Dark counts in {} sec exposure".format(exposure)) plt.ylabel("Fraction of pixels (area normalized to 1)") plt.legend() # + plt.figure(figsize=(20, 10)) plot_dark_with_distributions(darks[-1], high_read_noise, dark_rate, n_images=1) plt.ylim(0, 0.8) plt.grid() # - # #### This dark frame measures noise, not dark current # # The pixel distribution is clearly a Gaussian distribution with a width determined by the read noise, not the underlying Poisson distribution that a dark frame is trying to measure. The only way around this (assuming the dark current is large enough that it needs to be subtracted at all) is to make the exposure long enough that the expected counts exceed the dark current. # # We explore that case below by adding in a much smaller amount of noise. # + plt.figure(figsize=(20, 10)) plot_dark_with_distributions(image_average, high_read_noise, dark_rate, n_images=n_images) plt.ylim(0, 0.8) plt.grid() # - # # OOF! WHY IS THIS NOT A POISSON DISTRIBUTION? # # ## Maybe the average of a bunch of Poisson distributions is not a Poisson distribution and is instead a Gaussian? # Nope, not a gaussian either, but not a Poisson. Note below that the *sum* is a Poisson with mean value `n_images` times larger than the single-image value. To scale to the average, calculate the Poisson distribution with mean value $N_{images} d_C(t)$, plot that as a function of `counts/n_images`, and normalize the resulting distribution. # # ##### Also, it *is* the expected distribution for a sum of Poissons IF the read noise is zero or small. # ### Plot below shows properly calculated Poisson and Gaussian distributions for sum of each type. # + plt.figure(figsize=(20, 10)) h = plt.hist((n_images * image_average).flatten(), bins=20, align='mid', density=True, label="Histogram of dark current counts"); bins = h[1] expected_mean_dark = dark_rate * exposure / gain pois = stats.poisson(expected_mean_dark * n_images) pois_x = np.arange(0, 300, 1) plt.plot(pois_x, pois.pmf(pois_x), label="Poisson dsitribution, mean of {:5.2f} counts".format(dark_rate * exposure / gain)) gauss = stats.norm(loc=expected_mean_dark * n_images, scale=high_read_noise / gain * np.sqrt(n_images)) gauss_x = np.linspace(*plt.xlim()) plt.plot(gauss_x, gauss.pdf(gauss_x), label='Gaussian, standdard dev is read noise in counts') plt.xlabel("Dark counts in {} exposure".format(exposure)) plt.ylabel("Number of pixels (area normalized to 1)") plt.legend() plt.grid() # - # ### Read noise much lower than dark current # # In this case the read noise is 1 $e^-$, lower than the expected dark current for this exposure time, 10$e^-$. low_read_noise = 1 # + pixels = np.random.randint(50, high=190, size=n_pixels) pixel_values = np.zeros(n_images) pixel_averages = np.zeros(n_images) base_image = np.zeros([image_size, image_size]) darks = np.zeros([n_images, image_size, image_size]) plt.figure(figsize=(20, 10)) for n in range(n_images): darks[n] = dark_current(base_image, dark_rate, exposure, gain=gain, hot_pixels=False) darks[n] = darks[n] + read_noise(base_image, low_read_noise, gain=gain) for pixel in pixels: for n in range(n_images): pixel_values[n] = darks[n, pixel, pixel] plt.plot(pixel_values, label='pixel [{0}, {0}]'.format(pixel), alpha=0.5) pixel_averages += pixel_values image_average = darks.mean(axis=0) plt.plot(pixel_averages / n_pixels, linewidth=3, label='Average over {} pixels'.format(n_pixels)) # plt.xlim(0, n_images - 1) plt.hlines(dark_rate * exposure / gain, *plt.xlim(), linewidth=3, label="Expected counts") plt.xlabel('Image number') plt.ylabel('Counts due to dark current') plt.legend() plt.grid() # + plt.figure(figsize=(20, 10)) plot_dark_with_distributions(darks[-1], low_read_noise, dark_rate, n_images=1) plt.ylim(0, 0.8) plt.grid() # -
notebooks/03.01-Dark-current-The-ideal-case.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # from https://randomastronomy.wordpress.com/2014/07/14/a-python-with-a-long-memory-fitting-1f-noise-with-pymc/ # + import datetime import pymc import numpy as np import spacepy.plot as spp # for the style import matplotlib.pyplot as plt import spacepy.toolbox as tb import spacepy.plot as spp # %matplotlib inline datetime.datetime.now() # - # randomastronomy # Astronomy, physics, computers and probability โ€” all here! # # Search # Search # Main menu # Skip to primary content # Home # About # Post navigationโ† PreviousNext โ†’ # A python with a long memory: fitting 1/f noise with PyMC # Posted on July 14, 2014 # Introduction # # Since ancient times, astronomy has been observing long-memory processes (i.e., short frequency noise) all over the place. The problem has been a tough one for lots of people working on time-series, specially for the ones involved in exoplanets, as the lightcurves (which show an apparent decrease in the observed flux of the star as the result of the planet eclipsing the star) usually show signs of red noise which canโ€™t be neglected, as they affect the physical properties of the planet and the star derived from these measurements (Pont, Zucker & Queloz, 2006). # # A special case of these kinds of long-memory processes, flicker (or 1/f, or pink) noise, has attracted the attention of the community in general for more than forty years (see, e.g., Press, 1978) but, until recent years, little work has been done to account for that kind of noise, despite the fact that approximate solutions for the problem have been around since almost twenty years (Wornell, 1995). However, a few years ago, Carter & Winn (2009) published a very interesting paper in which they derive and implement a very interesting methodology following the approaches given in Wornell (1995), in which a wavelet-based approach is used to transform the time series. The trick is that in wavelet space, 1/f processes have nearly diagonal covariance matrices, so the problem is extremly simplified when wavelet-transforming the time-series. The implementation assumes a model of the form: # # f(t,\theta) + \epsilon_{\textnormal{1/f}}(t)+\epsilon_{\textnormal{wn}}(t), # # where the first term is a deterministic model with parameters \theta, the second term is a zero mean 1/f stochastic process (which is defined by only one parameter, \sigma_r, which defines its amplitude but is not the square root of its variance), implying its Power Spectral Density is proportional to 1/f and the third term is a zero mean white noise process, here assumed to be gaussian (and hence with only one parameter defining it, \sigma_w, the square root of its variance). Furthermore, the model assumes the time-sampling is uniform. # # The work of Carter & Winn (2009) was implemented in a code called Transit Analysis Package (TAP; Gazack et al., 2012), which was coded in Interactive Data Language (IDL), a very nice coding plataform/language, but also a very expensive one for a poor graduate student like myself1. So I decided to make my own implementation in Python of the work of Carter & Winn (2009), but using PyMC, a really amazing python module that implements Bayesian statistical models and fitting algorithms, which includes Markov chain Monte Carlo. In this blog post I want to make a little tutorial on how to use this implementation; you can download all the codes for your research in my personal webpage [here]. If you use it, please, cite the work of Carter & Winn (2009) and acknowledge the hard work I have put in this implementation. # # Installing the package # # To install this package, download the code from my the package repository, and unpack it to a folder. Inside you will find a install.py file that you have to run, and this will create a file called FWT.so inside the WaveletCode folder; this is a Discrete Wavelet Transform and Inverse Wavelet Transform implementation in C that I wrote, based on the algorithm in the Numerical Recipes. The coding in C is done mainly because it is fast, and it can be called from Python as you can check on the Wavelets.py code in that same folder. # # After the above is done you are done! From now on, I will assume you are working on the same directory as the FlickerLikelihood.py file that we will use (which is on the same folder as the install.py file). Letโ€™s make a little experiment now! # # A simple problem: fitting a line with 1/f + white gaussian noise # # Letโ€™s solve the simple problem of fitting a line of the form f(t)=at+b which has both additive white and 1/f noise. In the following plot, I have created 1/f noise following the method of <NAME>, with \beta=1 (in red), and I added white gaussian noise in order to make the problem even more challenging (black dots): # # noise_figure # # The next step is to add a model to that noise. I added a model of the form f(t)=at+b, with a=0.5 and b=3, which is plotted in the following figure (you can download the dataset from here to run the codes showed here): # # dataset_figure # # And now comes the code. Using the FlickerLikelihood.py file contained in the package, which calculates the likelihood derived by <NAME> (2009) with the function get_likelihood, which takes as inputs (1) the residuals, (2) the standard deviation of the white noise, \sigma_w and (3) a parameter that defines the amplitude of the flicker noise model, \sigma_r, one can create a likelihood using our model with PyMC, which I called LineFlickerModel.py: # # LineFlickerModel.py # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 11 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # 19 # 20 # 21 # 22 # 23 # 24 # 25 # 26 # 27 # import sys # sys.path.append("WaveletCode") # import FlickerLikelihood # import numpy as np # from pymc import * # import Wavelets # # # Functions # # def model(t,a,b): # return a*t+b # # # Get the data # # # t,data = np.loadtxt('flicker_dataset.dat',unpack=True) # # # Priors # # # b = Uniform('b',-10,10) # a = Uniform('a',-1,1) # sigma_w = Uniform('sigma_w',0,100) # sigma_r = Uniform('sigma_r',0,100) # # # Likelihood # # @observed # def likelihood(value=data,t=t,a=a,b=b,sigma_w=sigma_w,sigma_r=sigma_r): # residuals=data-model(t,a,b) # return FlickerLikelihood.get_likelihood(residuals,sigma_w,sigma_r) # With this code, now I can run some MCMC links using PyMC; the following code uses the above code and draws samples from it: # # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 11 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # 19 # 20 # 21 # import numpy as np # import matplotlib.pyplot as plt # import pymc # import LineFlickerModel # # # Set the number of iterations and burnins: # niterations = 1e5 # nburn = 1e4 # # # Set MAP estimates as starting point of MCMC. First, calculate MAP estimates: # M = pymc.MAP(LineFlickerModel) # M.fit() # # Now set this as starting point for the MCMC: # mc=pymc.MCMC(M.variables) # # # And start the sample! # mc.sample(iter=niterations+nburn,burn=nburn) # # # Plot the final samples (posterior samples): # pymc.Matplot.plot(mc) # plt.show() # The results you will get will be the posterior samples for each parameter. In particular, for the fitted parameters I get: # # a=0.501 \pm 0.008 # # b=3.04 \pm 2.8 # # (Here I cited the standard deviations of the posterior samples as errors). Note the huge errorbar on b: this is good! It is good because due to the correlated noise, we are very uncertain about the value (recall that the 1/f noise + white gaussian noise is a stochastic process and, as such, we only observe one realization of it; we can take into account this, but this wonโ€™t make our measurements more precise, only more accurate). What if we try fitting a white gaussian noise model (as most people would do!), not taking into account that we have correlated noise? I will leave that problem to the reader, but the MCMC samples of both, the gaussian white noise run (black points) and the 1/f + gaussian white noise run (red points), are plotted on the following figure: # # posterior_figure # # The results I get for the parameters are: # # a_{\textnormal{g}}=0.493 \pm 0.002 # # b_{\textnormal{g}}=4.56 \pm 0.63 # # Note the very little errorbar on the estimated parameter b_{\textnormal{g}}! This is BAD! What this shows (and you can generate more datasets and simulate this claim; which is actually something <NAME> (2009) already did) is that, in general, the errorbars on the parameters will be more realistic if you take into account the proper noise model: on average you will get the same results, but for a particular dataset, the errorbars taking into account the 1/f noise model will be more realistic than the ones you would get by assuming a gaussian (or any) white noise model. For the statisticians on the crowd: the estimator taking into account the 1/f noise has a lower variance than the estimator taking into account white gaussian noise. # # The challenge in real life is, of course, to know a-priori what kind of noise you actually have. There are several tools for this that are out of the scope of this blog post but, if you are really interested, you can check some of them on our work on this subject on Jordรกn, Espinoza, et. al (2013); the paper is a little technical in general, but the relevant section to this problem is Section 4.2. # # Conclusions # # I want to make the conclusions super short (because this was intended only as a tutorial on how to use my code), but in summary what I want to say is: # # Know your noise (or at least try to understand it). # This is the best advice I can give to anyone around the world: small errorbars donโ€™t imply high accuracy. # This is it! If you have any questions, remarks, thanks or find any bugs, please, the comment section is open! All the codes used in this example are on the GitHub repository of this package here: https://github.com/nespinoza/flicker-noise/tree/master/example. # # 1: To be fair, the real reasons of why I wanted to make a Python implementation were that: (1) I stopped coding in IDL as soon as I knew Python existed, (2) most of my colleagues work in python and (3) I really wanted to make use of PyMC for this kind of analyses. # # SHARE THIS: # TwitterFacebook # # This entry was posted in Statistics and tagged noise models by Nรฉstor. Bookmark the permalink. # Leave a Reply # # # Enter your comment here... # The Twenty Eleven Theme. | Blog at WordPress.com. # Follow # Follow โ€œrandomastronomyโ€ # # Get every new post delivered to your Inbox. # # # Enter your email address # # Sign me up # # Build a website with WordPress.com # :)
Learning/fitting_1_f_noise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # All the IPython Notebooks in this lecture series are available at https://github.com/rajathkumarmp/Python-Lectures # + [markdown] slideshow={"slide_type": "slide"} # # Control Flow Statements # + [markdown] slideshow={"slide_type": "-"} # ## If # + [markdown] slideshow={"slide_type": "-"} # if some_condition: # # algorithm # + slideshow={"slide_type": "fragment"} x = 12 if x >10: print("Hello") # + [markdown] slideshow={"slide_type": "subslide"} # ## If-else # + [markdown] slideshow={"slide_type": "-"} # if some_condition: # # algorithm # # else: # # algorithm # + slideshow={"slide_type": "fragment"} x = 12 if x > 10: print("hello") else: print("world") # + [markdown] slideshow={"slide_type": "subslide"} # ## if-elif # + [markdown] slideshow={"slide_type": "-"} # if some_condition: # # algorithm # # elif some_condition: # # algorithm # # else: # # algorithm # + slideshow={"slide_type": "subslide"} x = 10 y = 12 if x > y: print("x>y") elif x < y: print ("x<y") else: print ("x=y") # + [markdown] slideshow={"slide_type": "subslide"} # if statement inside a if statement or if-elif or if-else are called as nested if statements. # + slideshow={"slide_type": "-"} x = 10 y = 12 if x > y: print ("x>y") elif x < y: print ("x<y") if x==10: print ("x=10") else: print ("invalid") else: print ("x=y") # + [markdown] slideshow={"slide_type": "slide"} # ## Loops # + [markdown] slideshow={"slide_type": "-"} # ### For # + [markdown] slideshow={"slide_type": "-"} # for variable in something: # # algorithm # + slideshow={"slide_type": "subslide"} for i in range(5): print(i) # + [markdown] slideshow={"slide_type": "-"} # In the above example, i iterates over the 0,1,2,3,4. Every time it takes each value and executes the algorithm inside the loop. It is also possible to iterate over a nested list illustrated below. # + slideshow={"slide_type": "subslide"} list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] for list1 in list_of_lists: print (list1) # + [markdown] slideshow={"slide_type": "subslide"} # A use case of a nested for loop in this case would be, # + slideshow={"slide_type": "-"} list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] for list1 in list_of_lists: for x in list1: print (x) # + [markdown] slideshow={"slide_type": "slide"} # ## Break # + [markdown] slideshow={"slide_type": "-"} # As the name says. It is used to break out of a loop when a condition becomes true when executing the loop. # + slideshow={"slide_type": "-"} for i in range(100): print (i) if i>=7: break # + [markdown] slideshow={"slide_type": "slide"} # ## Continue # + [markdown] slideshow={"slide_type": "-"} # This continues the rest of the loop. Sometimes when a condition is satisfied there are chances of the loop getting terminated. This can be avoided using continue statement. # + slideshow={"slide_type": "-"} for i in range(10): if i>4: print ("The end.") continue elif i<7: print (i) # - # <div class="alert alert-success"> # <b>EXERCISE</b>: Create a list with ten values, then loop on it to print the values that are smaller than 3, and use break to interrupt the loop if a value equal to 10 appears # </div> # + [markdown] slideshow={"slide_type": "slide"} # ## List Comprehensions # + [markdown] slideshow={"slide_type": "-"} # Python makes it simple to generate a required list with a single line of code using list comprehensions. For example If i need to generate multiples of say 27 I write the code using for loop as, # + slideshow={"slide_type": "-"} res = [] for i in range(1,11): x = 27*i res.append(x) print (res) # + [markdown] slideshow={"slide_type": "subslide"} # Since you are generating another list altogether and that is what is required, List comprehensions is a more efficient way to solve this problem. # + slideshow={"slide_type": "-"} [27*x for x in range(1,11)] # + [markdown] slideshow={"slide_type": "-"} # That's it!. Only remember to enclose it in square brackets # + [markdown] slideshow={"slide_type": "subslide"} # Understanding the code, The first bit of the code is always the algorithm and then leave a space and then write the necessary loop. But you might be wondering can nested loops be extended to list comprehensions? Yes you can. # + slideshow={"slide_type": "-"} [27*x for x in range(1,20) if x<=10] # + [markdown] slideshow={"slide_type": "-"} # Let me add one more loop to make you understand better, # + slideshow={"slide_type": "-"} [27*z for i in range(50) if i==27 for z in range(1,11)]
C - Control Flow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + # http://datalab.naver.com/ca/step1.naver # ๋„ค์ด๋ฒ„ ๊ฒ€์ƒ‰์–ด ํŠธ๋ Œ๋“œ ์กฐํšŒ # - url = "http://datalab.naver.com/ca/step1/process.naver" response = requests.post( url, data={ "qcType": "N", "queryGroups": "Python__SZLIG__Python,ํŒŒ์ด์ฌ__OUML__Matlab__SZLIG__Matlab,๋งคํŠธ๋žฉ", "startDate": "20160201", "endDate": "20170228", } ) data = response.json() dfs = [ pd.DataFrame(search_group.get("data"))\ .set_index("period")\ .rename(columns={"value": search_group.get("title")}) for search_group in data.get("result") ] ax = pd.concat(dfs, axis=1).astype("float").plot() ax.set_title("๋„ค์ด๋ฒ„ ๊ฒ€์ƒ‰๋Ÿ‰ ์ถ”์ด")
_examples/naver trend dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # <NAME> - 2020 - MIT License # This script will automatically tweet new data and graphes on the account @covidtracker_fr # importing the module import france_data_management as data import math from datetime import datetime import locale import tweepy import pandas as pd import secrets as s from datetime import timedelta PATH = "../../" locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8') """ Secrets : consumer_key ="xxxxxxxxxxxxxxxx" consumer_secret ="xxxxxxxxxxxxxxxx" access_token ="<KEY>" access_token_secret ="<KEY>" """ # authentication auth = tweepy.OAuthHandler(s.consumer_key, s.consumer_secret) auth.set_access_token(s.access_token, s.access_token_secret) api = tweepy.API(auth) def tweet_france_maps(): #_, _, dates, _, _, _, _, df_incid, _ = data.import_data() #df_incid = df_incid[df_incid["cl_age90"] == 0] df_incid_fra_clage = data.import_data_tests_viros() df_incid = df_incid_fra_clage[df_incid_fra_clage["cl_age90"]==0] lastday_df_incid = datetime.strptime(df_incid['jour'].max(), '%Y-%m-%d') ## TWEET2 df_incid["incidence"] = df_incid["P"].rolling(window=7).sum() / df_incid["pop"] * 100000 df_incid_lastday = df_incid.loc[df_incid['jour']==df_incid['jour'].max(), :] filter_departement_alerte = df_incid_lastday[df_incid_lastday["incidence"] >= 50] nb_dep = len(filter_departement_alerte) departements_alerte = filter_departement_alerte.departmentName.values departements_alerte_valeurs = filter_departement_alerte.incidence.values images_path2 =[PATH+"images/charts/france/dep-map-incid-cat/latest.jpeg"] media_ids2 = [] for filename in images_path2: res = api.media_upload(filename) media_ids2.append(res.media_id) tweet = "๐Ÿ”ด {} dรฉpartements (mรฉtropole + DOM-TOM) devraient รชtre classรฉs rouge, car ils dรฉpassent le niveau d'alerte de 50 cas pour 100 000 habitants en 7 jours (donnรฉes du {})\nโžก๏ธ Plus d'infos : covidtracker.fr/covidtracker-france".format(nb_dep, lastday_df_incid.strftime('%d/%m')) tweet_departements = "Dรฉpartements dรฉpassant le seuil d'alerte : " for (idx, departement) in enumerate(departements_alerte): tweet_departements += departement + " (" + str(int(round(departements_alerte_valeurs[idx]))) + "), " tweet_departements = tweet_departements[:len(tweet_departements)-2] if len(tweet_departements)>240: tweet_departements = tweet_departements[:236] + "โ€ฆ" first_tweet = api.update_status(status=tweet, media_ids=media_ids2) reply_tweet = api.update_status(status=tweet_departements, in_reply_to_status_id=first_tweet.id, auto_populate_reply_metadata=True) tweet_france_maps()
src/france/tweetbot_france_maps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Builder Tutorial number 3 # # The builder tutorials demonstrate how to build an operational GSFLOW model using `pyGSFLOW` from shapefile, DEM, and other common data sources. These tutorials focus on the `gsflow.builder` classes. # # ## Calculating flow direction and flow accumulation arrays # # In this tutorial, we demonstrate how to calculate flow direction and flow accumulation arrays for use in building new GSFLOW models. Generating these datasets is necessary for later defining model stream networks and cascades. import os import matplotlib.pyplot as plt import matplotlib.colors as mcolors import numpy as np import flopy from gsflow.builder import GenerateFishnet # ### The `FlowAccumulation` class # # The `FlowAccumulation` class performs many operations including generating flow direction arrays and flow accumulation arrays. This example notebook focuses is on the `flow_direction` and `flow_accumulation` methods of this class. Other methods are presented in following tutorials. # # The `FlowAccumulation` class has 3 required parameters and 5 optional input parameters: # # **REQUIRED Parameters** # - `data` : resampled dem data array of dimension nrow, ncol (matches modelgrid dimension) # - `xcenters` : a two dimensional array of x coordinate cell centers (dimension nrow, ncol) # - `ycenters` : a two dimensional array of y coordinate cell centers (dimension nrow, ncol) # # **OPTIONAL Parameters** # - `acc_type` : flow accumlation type, currently only "d8" is supported # - `hru_type` : optional hru_type array where 0=inactive, 1=land, 2=lake, and 3=swale # - `closed_basin` : If true hru_type 2 is used in the flow direction calculations. False ignores hru_type 2. Default is False. # - `flow_dir_array` : previously calculated flow direction array. This parameter is used to restart the class without performing flow direction analysis # - `verbose` : boolean flag to print verbose output # # The resulting object contains a number of methods that will be covered over several notebooks. # # Let's start with importing the class from gsflow.builder import FlowAccumulation # ## Applying the methods to the Sagehen 50m example problem # # In this example the methods are applied directly to the Sagehen 50m model as they are presented. # + # define the input and output data paths input_ws = os.path.join("data", "sagehen", "50m_tutorials") output_ws = os.path.join("data", "temp") # define the modelgrid and resampled DEM data paths mg_file = os.path.join(input_ws, "sagehen_50m_grid.bin") dem_data = os.path.join(input_ws, "sagehen_50m_dem_median.txt") # - # ### Instantiating the `FlowAccumulation` class for the first time # # For the first time instantiating the class for the Sagehen 50m model, the modelgrid cell center coordinates, and dem data are supplied to the `FlowAccumulation` class. # # By only suppling these three required data sets, `FlowAccumulation` assumes that all cells are active land cells. The user can optionally supply a `hru_type` array if they would like to exclude certain areas from the flow direction and accumulation calculations. # + # load modelgrid modelgrid = GenerateFishnet.load_from_file(mg_file) dem_data = np.genfromtxt(dem_data) # instatiate the FlowAccumulation object fa = FlowAccumulation( dem_data, modelgrid.xcellcenters, modelgrid.ycellcenters, verbose=True ) # - # ### Calculating the D8 flow direction array # # The D8 flow direction calculation encodes a digital number based on maximum slope between a cell and it's nearest neighbors. The figure below shows the digital number that is encoded for each possible flow direction. # # <img src = "./figures/8.5.1.png"> # # Because D8 flow directions only code a single outlet direction connectivity of the network can be described as many cells can flow to one cell, and one cell can only flow to a single downgradient neighbor. # # The `flow_directions()` method performs these calculations and has two optional parameters: # - `dijkstra` : boolean flag to perform a modified version of dijkstra's algorithm to solve the maze problem in digitally flat areas. Default method is to use a topological method, which performs well in areas with adequate topography. # - `breach` : optional parameter that is the absolute value of breaching tolerance for digital dams. This parameters allows the flow direction routine to ignore small artifacts. Use caution when applying breaching values; they should be small numbers. # # In this example dijkstra's algorithm is used and a small breaching tolerance is applied to overcome a single digital dam in the dem data. # use a small breaching tolerance and dijkstra's algorithm in this example flow_directions = fa.flow_directions(dijkstra=True, breach=0.001) # Now let's inspect the flow direction array that we've generated # + # plot the flow direction array fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax) pc = pmv.plot_array( flow_directions, vmin=1, vmax=128, cmap="Dark2", norm=mcolors.PowerNorm(0.3) ) plt.title("Sagehen 50m flow direction array") plt.colorbar(pc, shrink=0.7, ticks=[1, 2, 4, 8, 16, 32, 64, 128]) plt.show(); # - # Alternatively we can plot this data as a flow vector (quiver) chart # + qx, qy = fa.get_vectors # plot the flow directions as a quiver map fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax) plt.quiver(modelgrid.xcellcenters, modelgrid.ycellcenters, qx, qy) plt.title("Sagehen 50m flow direction vectors") plt.show(); # - # ### Calculating the flow accumulation array # # The flow accumulation array is calculated using Number of Input Drainage Paths (NIDP) methodology to deterine the contributing drainage area (number of cells) that drain to each model grid cell. # # Flow accumulation can be run after the flow direction array has been produced by calling the `flow_accumulation()` method. # run flow accumulation flow_accumulation = fa.flow_accumulation() # Now let's inspect the flow accumulation array by plotting it # + # plot the flow accumulation array fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, aspect="equal") pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax) pc = pmv.plot_array( flow_accumulation, vmin=0, vmax=12000 ) plt.title("Sagehen 50m flow accumulation array") plt.colorbar(pc, shrink=0.7) plt.show(); # - # ## Saving flow direction and flow accumulation arrays for later use # # The builder methods allow the user to save the flow direction and flow accumulation arrays and pick up where they left off in another session or script. # # These arrays can be saved using numpy's `savetxt()` method. # # *In the next tutorial, we'll reload both of these arrays and pick up from where we left off.* # + np.savetxt( os.path.join(output_ws, "sagehen_50m_flowdir.txt"), flow_directions.astype(int), delimiter=" ", fmt="%d") np.savetxt( os.path.join(output_ws, "sagehen_50m_flowacc.txt"), flow_accumulation, delimiter=" " ) # - # ## Saving the depressionless DEM # # The `FlowAccumulation` method automatically fills pits within the raster. Because of this, it is reccomended that the user save the depressionless DEM for later use after flow direction and flow accumulation have been performed. np.savetxt( os.path.join(output_ws, "sagehen_50m_dem.txt"), fa.get_dem_data(), delimiter=" " )
examples/pygsflow1.1.0_Builder_tutorial_03_flow_direction_and_accumulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: U4-S3-DNN (Python 3.7) # language: python # name: u4-s3-dnn # --- # <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> # <br></br> # <br></br> # # ## *Data Science Unit 4 Sprint 3 Assignment 1* # # # Recurrent Neural Networks and Long Short Term Memory (LSTM) # # ![Monkey at a typewriter](https://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Chimpanzee_seated_at_typewriter.jpg/603px-Chimpanzee_seated_at_typewriter.jpg) # # It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM. # # This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txt # # Use it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach. # # Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size. # # Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more! from tensorflow.python.client import device_lib print(device_lib.list_local_devices()) # + # import from __future__ import print_function from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.datasets import imdb import numpy as np import pandas as pd import requests # - # configure the features and batches max_features = 100000 # cut texts after this number of words (among top max_features most common words) maxlen = 40 step = 1 batch_size = 32 # load the data works = '' works_path = open('module1-rnn-and-lstm/works_of_Shakespeare.txt', 'r', encoding='utf8') works = works_path.read() print(len(works)) # + # encode data as characters and sequences # encode characters chars = sorted(list(set(works))) char_indicies = dict((c, i) for i, c in enumerate(chars)) indicies_char = dict((i, c) for i, c in enumerate(chars)) # encode sequences sentences = [] # x next_chars = [] # y for i in range(0, len(works) - maxlen, step): sentences.append(works[i: i + maxlen]) next_chars.append(works[i + maxlen]) print('sequences:', len(sentences)) # - sentences[10] # + # specify x and y x = [] y = [] x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sentences), len(chars)), dtype=np.bool) for i, sentence in enumerate(sentences): for t, char in enumerate(sentence): x[i, t, char_indicies[char]] = 1 y[i, char_indicies[next_chars[i]]] = 1 # - print(x[0].shape) print(y[0].shape) # + # build model model = Sequential() model.add(LSTM(128, input_shape=(maxlen, len(char)))) model.add(Dense(len(chars), activation='softmax')) optimizer = RMSprop(learning_rate=0.01) model.compile(loss='categorical_crossentropy', optimizer=optimizer) # + [markdown] colab_type="text" id="zE4a4O7Bp5x1" # # Resources and Stretch Goals # + [markdown] colab_type="text" id="uT3UV3gap9H6" # ## Stretch goals: # - Refine the training and generation of text to be able to ask for different genres/styles of Shakespearean text (e.g. plays versus sonnets) # - Train a classification model that takes text and returns which work of Shakespeare it is most likely to be from # - Make it more performant! Many possible routes here - lean on Keras, optimize the code, and/or use more resources (AWS, etc.) # - Revisit the news example from class, and improve it - use categories or tags to refine the model/generation, or train a news classifier # - Run on bigger, better data # # ## Resources: # - [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) - a seminal writeup demonstrating a simple but effective character-level NLP RNN # - [Simple NumPy implementation of RNN](https://github.com/JY-Yoon/RNN-Implementation-using-NumPy/blob/master/RNN%20Implementation%20using%20NumPy.ipynb) - Python 3 version of the code from "Unreasonable Effectiveness" # - [TensorFlow RNN Tutorial](https://github.com/tensorflow/models/tree/master/tutorials/rnn) - code for training a RNN on the Penn Tree Bank language dataset # - [4 part tutorial on RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) - relates RNN to the vanishing gradient problem, and provides example implementation # - [RNN training tips and tricks](https://github.com/karpathy/char-rnn#tips-and-tricks) - some rules of thumb for parameterizing and training your RNN
module1-rnn-and-lstm/LS_DS_431_RNN_and_LSTM_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SIR Epidemiological Model import numpy as np from scipy.integrate import odeint import matplotlib.pyplot as plt import pandas as pd import math from hyperopt import fmin, tpe, STATUS_OK, STATUS_FAIL, Trials, hp import datetime # ## Basic SIRD population = pd.read_csv("data/us/demographics/county_populations.csv") nyt_data = pd.read_csv("data/us/covid/nyt_us_counties_daily.csv") population.head(1) nyt_data.head(1) fips = 6037 pop = int(population.loc[population["FIPS"] == fips]["total_pop"]) df = nyt_data.loc[nyt_data["fips"] == fips] df.reset_index(inplace=True) # Find first cases infection_start = df.loc[df["cases"] > 0].first_valid_index() start_date = df.iloc[infection_start]["date"] # Initial number of susceptible, infectious, recovered, deceased inf_init = df.iloc[infection_start]["cases"] rec_init = 0 dec_init = df.iloc[infection_start]["deaths"] sus_init = pop - inf_init - rec_init - dec_init beta = 0.19 # Infection rate gamma = 1./14 # Recovery rate mu = 1./60 # Mortality rate t = np.linspace(0, len(df), len(df)) def get_derivatives(y, t, N, beta, gamma, mu): S, I, R, D = y dSdt = - beta * I * S / N dIdt = beta * I * S / N - gamma * I - mu * I dRdt = gamma * I dDdt = mu * I return dSdt, dIdt, dRdt, dDdt y_init = sus_init, inf_init, rec_init, dec_init # Integrate over the time space res = odeint(get_derivatives, y_init, t, args=(pop, beta, gamma, mu)) S, I, R, D = res.T # Combine predictions and data to plot df["S"], df["I"], df["R"], df["D"], df["c_deaths"], df["c_cases"], df["C"] = 0, 0, 0, 0, 0, 0, 0 for i, row in df.iterrows(): df.loc[i, "S"] = S[i] df.loc[i, "I"] = I[i] df.loc[i, "R"] = R[i] df.loc[i, "D"] = D[i] try: df.at[i, "C"] = df.loc[i, "I"] + df.loc[i, "R"] df.at[i, "c_deaths"] = df.loc[i - 1, "c_deaths"] + df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i - 1, "c_cases"] + df.loc[i, "cases"] except Exception as e: df.at[i, "C"] = df.loc[i, "I"] + df.loc[i, "R"] df.at[i, "c_deaths"] = df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i, "cases"] # Plot the results vs. the actual data fig, ax = plt.subplots() ax.plot('date', 'c_deaths', data=df, label="Actual Cumulative Deaths") ax.plot('date', 'c_cases', data=df, label="Actual Cumulative Cases") #ax.plot('date', 'S', data=df, label="SIRD Susceptible") ax.plot('date', 'C', data=df, label="SIRD Infected + Recovered") ax.plot('date', 'D', data=df, label="SIRD Deceased") plt.title("SIRD Model for LA County") plt.legend() plt.show() # ## Finding Better Parameters with HyperOpt def SIRD_rmse(S, I, R, D, df): case_err, death_err, c_case_err, c_death_err = 0, 0, 0, 0 for i, row in df.iterrows(): if i == 0: this_cases = I[i] + R[i] this_deaths = D[i] else: this_cases = I[i] - I[i-1] + R[i] - R[i-1] this_deaths = D[i] - D[i-1] case_err += (row["cases"] - this_cases) ** 2 death_err += (row["deaths"] - this_deaths) ** 2 c_case_err += (row["c_cases"] - I[i]- R[i]) ** 2 c_death_err += (row["c_deaths"] - D[i]) ** 2 case_err /= len(S) death_err /= len(S) return math.sqrt(death_err)# + math.sqrt(c_death_err) / 2 class HyperOpt(object): def __init__(self, population, data, y_init, timespace): self.data = data.copy() self.data.reset_index(inplace=True) self.y_init = y_init self.pop = population self.t = timespace def eval_sird(self, params): result = odeint(get_derivatives, self.y_init, self.t, args=(self.pop, params["beta"], params["gamma"], params["mu"])) S, I, R, D = result.T rmse = SIRD_rmse(S, I, R, D, self.data) return rmse def optimize_params(self, space, trials, algo, max_evals): result = fmin(fn=self.eval_sird, space=space, algo=algo, max_evals=max_evals, trials=trials, verbose=False) return result, trials # + fips = 17031 pop = int(population.loc[population["FIPS"] == fips]["total_pop"]) df = nyt_data.loc[nyt_data["fips"] == fips] df.reset_index(inplace=True) df["c_deaths"], df["c_cases"] = 0, 0 for i, row in df.iterrows(): try: df.at[i, "c_deaths"] = df.loc[i - 1, "c_deaths"] + df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i - 1, "c_cases"] + df.loc[i, "cases"] except Exception as e: df.at[i, "c_deaths"] = df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i, "cases"] # Find first cases infection_start = df.loc[df["cases"] > 0].first_valid_index() start_date = df.iloc[infection_start]["date"] # Initial number of susceptible, infectious, recovered, deceased inf_init = df.iloc[infection_start]["cases"] rec_init = 0 dec_init = df.iloc[infection_start]["deaths"] sus_init = pop - inf_init - rec_init - dec_init y_init = sus_init, inf_init, rec_init, dec_init t = np.linspace(0, len(df)-14-infection_start, len(df)-14-infection_start) print (len(t)) # - param_space = { "beta": hp.uniform("beta", 0.1, 0.3), "gamma": hp.uniform("gamma", 0.05, 0.1), "mu": hp.uniform("mu", 0.02, 0.03) } hopt = HyperOpt(pop, df[infection_start:-14], y_init, t) optimized, trials = hopt.optimize_params(space=param_space, trials=Trials(), algo=tpe.suggest, max_evals=100) print("Best parameters:\n", optimized) # + # Plot the results max_deaths = nyt_data["deaths"][:-14].max() t = np.linspace(0, len(df), len(df)) res = odeint(get_derivatives, y_init, t, args=(pop, optimized["beta"], optimized["gamma"], optimized["mu"])) S, I, R, D = res.T df["S"], df["I"], df["R"], df["D"], df["D_diff"], df["c_deaths"], df["c_cases"], df["C"] = 0, 0, 0, 0, 0, 0, 0, 0 for i, row in df.iterrows(): if i < infection_start: df.loc[i, "S"] = 0 df.loc[i, "I"] = 0 df.loc[i, "R"] = 0 df.loc[i, "D"] = 0 df.loc[i, "D_diff"] = 0 else: df.loc[i, "S"] = S[i-infection_start] df.loc[i, "I"] = I[i-infection_start] df.loc[i, "R"] = R[i-infection_start] df.loc[i, "D"] = D[i-infection_start] if i == infection_start: df.loc[i, "D_diff"] = D[i-infection_start] else: df.loc[i, "D_diff"] = D[i-infection_start] - D[i-1-infection_start] try: df.at[i, "C"] = df.loc[i, "I"] + df.loc[i, "R"] df.at[i, "c_deaths"] = df.loc[i - 1, "c_deaths"] + df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i - 1, "c_cases"] + df.loc[i, "cases"] except Exception as e: df.at[i, "C"] = df.loc[i, "I"] + df.loc[i, "R"] df.at[i, "c_deaths"] = df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i, "cases"] df["D_diff"].clip(lower=0, upper=max_deaths, inplace=True) fig, ax = plt.subplots() ax.plot('date', 'deaths', data=df[:-14], label='Actual Deaths per Day (Training)') ax.plot('date', 'deaths', data=df[-14:], label='Actual Deaths per Day (Testing)') ax.plot('date', 'D_diff', data=df, label='SIRD Deceased w/ Differencing') #ax.plot('date', 'c_deaths', data=df, label="Actual Cumulative Deaths") #ax.plot('date', 'c_cases', data=df, label="Actual Cumulative Cases") #ax.plot('date', 'S', data=df, label="SIRD Susceptible") #ax.plot('date', 'C', data=df, label="SIRD Infected + Recovered") #ax.plot('date', 'D', data=df, label="SIRD Deceased") plt.title("SIRD Model vs. Actual Data") plt.legend() plt.show() # - # # Generating a Submission with SIRD fips_list = pd.read_csv("data/us/processing_data/fips_key.csv", encoding="cp1252") test_per = 14 pred_per = 0 ids, i10, i20, i30, i40, i50, i60, i70, i80, i90 = [], [], [], [], [], [], [], [], [], [] z_80 = 1.28 z_60 = 0.84 z_40 = 0.525 z_20 = 0.25 # + print(str(datetime.datetime.now())+"\n") max_deaths = 750 for idx, row in fips_list.iterrows(): county = int(row["FIPS"]) print("County " + str(county) + "...", end='\r', flush=True) df = nyt_data.loc[nyt_data["fips"] == county][:-test_per] df.reset_index(inplace=True) df["c_deaths"], df["c_cases"] = 0, 0 for i, row in df.iterrows(): try: df.at[i, "c_deaths"] = df.loc[i - 1, "c_deaths"] + df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i - 1, "c_cases"] + df.loc[i, "cases"] except Exception as e: df.at[i, "c_deaths"] = df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i, "cases"] try: cum_deaths = df.iloc[-1]["c_deaths"] except IndexError as e: if len(df) == 0: #print("No data found for county", str(county), ":", row["COUNTY"]) continue else: cum_deaths = 0 if cum_deaths >= 15: try: pop = int(population.loc[population["FIPS"] == county]["total_pop"]) except TypeError as e: print("No population found for county", str(county), ":", row["COUNTY"]) print("This county has at least 15 cumulative deaths!") raise e # Find first cases infection_start = df.loc[df["cases"] > 0].first_valid_index() start_date = df.iloc[infection_start]["date"] # Initial number of susceptible, infectious, recovered, deceased inf_init = df.iloc[infection_start]["cases"] rec_init = 0 dec_init = df.iloc[infection_start]["deaths"] sus_init = pop - inf_init - rec_init - dec_init y_init = sus_init, inf_init, rec_init, dec_init t = np.linspace(0, len(df)-infection_start, len(df)-infection_start) param_space = { "beta": hp.uniform("beta", 0, 1), "gamma": hp.uniform("gamma", 0, 1), "mu": hp.uniform("mu", 0, 1) } hopt = HyperOpt(pop, df[infection_start:], y_init, t) optimized, trials = hopt.optimize_params(space=param_space, trials=Trials(), algo=tpe.suggest, max_evals=100) t = np.linspace(0, len(df) + test_per + pred_per, len(df) + test_per + pred_per) res = odeint(get_derivatives, y_init, t, args=(pop, optimized["beta"], optimized["gamma"], optimized["mu"])) S, I, R, D = res.T date = datetime.date.fromisoformat(df.iloc[0]["date"]) + datetime.timedelta(days=int(infection_start)) for i, ddata in enumerate(D): this_id = date.isoformat() + "-" + str(county) date += datetime.timedelta(days=1) if i == 0: mid = ddata else: mid = ddata - D[i - 1] if mid > max_deaths: mid = max_deaths sd = 3 * math.sqrt(mid) ids.append(this_id) i10.append(mid - sd * z_80) i20.append(mid - sd * z_60) i30.append(mid - sd * z_40) i40.append(mid - sd * z_20) i50.append(mid) i60.append(mid + sd * z_20) i70.append(mid + sd * z_40) i80.append(mid + sd * z_60) i90.append(mid + sd * z_80) else: date = datetime.date.fromisoformat(df.iloc[0]["date"]) for i in range(len(df) + test_per + pred_per): this_id = date.isoformat() + "-" + str(county) date += datetime.timedelta(days=1) ids.append(this_id) i10.append(0) i20.append(0) i30.append(0) i40.append(0) i50.append(0) i60.append(0) i70.append(0) i80.append(0) i90.append(0) print("\n" + str(datetime.datetime.now())) # - predictions = pd.DataFrame(data={"id":ids, "10":i10, "20":i20, "30":i30, "40":i40, "50":i50, "60":i60, "70":i70, "80":i80, "90":i90}) predictions["10"] = predictions["10"].apply(lambda x: x if x >= 1 else 0) predictions["20"] = predictions["20"].apply(lambda x: x if x >= 1 else 0) predictions["30"] = predictions["30"].apply(lambda x: x if x >= 1 else 0) predictions["40"] = predictions["40"].apply(lambda x: x if x >= 1 else 0) predictions["50"] = predictions["50"].apply(lambda x: x if x >= 1 else 0) predictions["60"] = predictions["60"].apply(lambda x: x if x >= 1 else 0) predictions["70"] = predictions["70"].apply(lambda x: x if x >= 1 else 0) predictions["80"] = predictions["80"].apply(lambda x: x if x >= 1 else 0) predictions["90"] = predictions["90"].apply(lambda x: x if x >= 1 else 0) predictions.to_csv("test_submissions/sird.csv", index=False) # Best loss: 0.7628444761093374 # ## Which Counties are a Good Fit for SIRD? total_deaths = fips_list.copy() total_deaths.drop(["MSA/PMSA NECMA", "ST", "COUNTY"], axis=1, inplace=True) total_deaths["deaths"] = 0 total_deaths.head() # + for i, row in total_deaths.iterrows(): county = int(row["FIPS"]) c_row = nyt_data.loc[nyt_data["fips"] == county] cum_deaths = 0 for a, item in c_row.iterrows(): cum_deaths += int(item["deaths"]) total_deaths.at[i, "deaths"] = cum_deaths total_deaths.head() # - total_deaths.dropna(inplace=True) total_deaths.sort_values("deaths", inplace=True, ascending=False) total_deaths.head() # + test_per = 14 total_deaths.reset_index(inplace=True) for i, row in total_deaths.iterrows(): if i > 20: break fips = row["FIPS"] pop = int(population.loc[population["FIPS"] == fips]["total_pop"]) df = nyt_data.loc[nyt_data["fips"] == fips] df.reset_index(inplace=True) df["c_deaths"], df["c_cases"] = 0, 0 for i, row in df.iterrows(): try: df.at[i, "c_deaths"] = df.loc[i - 1, "c_deaths"] + df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i - 1, "c_cases"] + df.loc[i, "cases"] except Exception as e: df.at[i, "c_deaths"] = df.loc[i, "deaths"] df.at[i, "c_cases"] = df.loc[i, "cases"] # Find first cases infection_start = df.loc[df["cases"] > 0].first_valid_index() start_date = df.iloc[infection_start]["date"] # Initial number of susceptible, infectious, recovered, deceased inf_init = df.iloc[infection_start]["cases"] rec_init = 0 dec_init = df.iloc[infection_start]["deaths"] sus_init = pop - inf_init - rec_init - dec_init y_init = sus_init, inf_init, rec_init, dec_init t = np.linspace(0, len(df)-test_per-infection_start, len(df)-test_per-infection_start) param_space = { "beta": hp.uniform("beta", 0, 1), "gamma": hp.uniform("gamma", 0, 1), "mu": hp.uniform("mu", 0, 1) } hopt = HyperOpt(pop, df[infection_start:-test_per], y_init, t) optimized, trials = hopt.optimize_params(space=param_space, trials=Trials(), algo=tpe.suggest, max_evals=100) t = np.linspace(0, len(df), len(df)) res = odeint(get_derivatives, y_init, t, args=(pop, optimized["beta"], optimized["gamma"], optimized["mu"])) S, I, R, D = res.T rmse = SIRD_rmse(S, I, R, D, df) print("Error for county", fips, "was", str(rmse)) # - # Counties with error under 100: # - 17031 # - 36119 # - 34013 # - 34003 # - 6037 # - 9001 # - 25017 # - 34017 # - 26125 # - 9003 # - 34039 # - 26099 # - 42101 # - 34023 # - 9009 # - 34031 # - 25025 # # Counties with error under 25: # - 6037 # - 9001 # - 25017 # - 9003 # - 42101 # - 34023 # - 9009 # - 34031 # - 25025
Epidemiological Models/SIRD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Part I: On-policy learning and SARSA (3 points)](#Part-I:-On-policy-learning-and-SARSA-(3-points)) # # [Part II: Experience replay (4 points)](#Part-II:-experience-replay-(4-points)) # # [Bonus I: TD($ \lambda $) (5+ points)](#Bonus-I:-TD($\lambda$)-(5+-points)) # # [Bonus II: More pacman (5+ points)](#Bonus-II:-More-pacman-(5+-points)) # ## Part I: On-policy learning and SARSA (3 points) # # _This notebook builds upon `qlearning.ipynb`, or to be exact, generating qlearning.py._ # # The policy we're gonna use is epsilon-greedy policy, where agent takes optimal action with probability $(1-\epsilon)$, otherwise samples action at random. Note that agent __can__ occasionally sample optimal action during random sampling by pure chance. # + # In google collab, uncomment this: # # !wget https://bit.ly/2FMJP5K -q -O setup.py # # !bash setup.py 2>&1 1>stdout.log | tee stderr.log # This code creates a virtual display to draw game images on. # If you are running locally, just ignore it import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start # %env DISPLAY = : 1 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - # Now you can use code, generated from seminar `seminar_qlearning.ipynb`. Or just copy&paste it. # + # %%writefile qlearning.py from collections import defaultdict import random import math import numpy as np class QLearningAgent: def __init__(self, alpha, epsilon, discount, get_legal_actions): """ Q-Learning Agent based on https://inst.eecs.berkeley.edu/~cs188/sp19/projects.html Instance variables you have access to - self.epsilon (exploration prob) - self.alpha (learning rate) - self.discount (discount rate aka gamma) Functions you should use - self.get_legal_actions(state) {state, hashable -> list of actions, each is hashable} which returns legal actions for a state - self.get_qvalue(state,action) which returns Q(state,action) - self.set_qvalue(state,action,value) which sets Q(state,action) := value !!!Important!!! Note: please avoid using self._qValues directly. There's a special self.get_qvalue/set_qvalue for that. """ self.get_legal_actions = get_legal_actions self._qvalues = defaultdict(lambda: defaultdict(lambda: 0)) self.alpha = alpha self.epsilon = epsilon self.discount = discount def get_qvalue(self, state, action): """ Returns Q(state,action) """ return self._qvalues[state][action] def set_qvalue(self, state, action, value): """ Sets the Qvalue for [state,action] to the given value """ self._qvalues[state][action] = value #---------------------START OF YOUR CODE---------------------# def get_value(self, state): """ Compute your agent's estimate of V(s) using current q-values V(s) = max_over_action Q(state,action) over possible actions. Note: please take into account that q-values can be negative. """ possible_actions = self.get_legal_actions(state) # If there are no legal actions, return 0.0 if len(possible_actions) == 0: return 0.0 <YOUR CODE HERE > return value def update(self, state, action, reward, next_state): """ You should do your Q-Value update here: Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s')) """ # agent parameters gamma = self.discount learning_rate = self.alpha <YOUR CODE HERE > self.set_qvalue(state, action, < YOUR_QVALUE > ) def get_best_action(self, state): """ Compute the best action to take in a state (using current q-values). """ possible_actions = self.get_legal_actions(state) # If there are no legal actions, return None if len(possible_actions) == 0: return None <YOUR CODE HERE > return best_action def get_action(self, state): """ Compute the action to take in the current state, including exploration. With probability self.epsilon, we should take a random action. otherwise - the best policy action (self.getPolicy). Note: To pick randomly from a list, use random.choice(list). To pick True or False with a given probablity, generate uniform number in [0, 1] and compare it with your probability """ # Pick Action possible_actions = self.get_legal_actions(state) action = None # If there are no legal actions, return None if len(possible_actions) == 0: return None # agent parameters: epsilon = self.epsilon <YOUR CODE HERE > return chosen_action # + from qlearning import QLearningAgent class EVSarsaAgent(QLearningAgent): """ An agent that changes some of q-learning functions to implement Expected Value SARSA. Note: this demo assumes that your implementation of QLearningAgent.update uses get_value(next_state). If it doesn't, please add def update(self, state, action, reward, next_state): and implement it for Expected Value SARSA's V(s') """ def get_value(self, state): """ Returns Vpi for current state under epsilon-greedy policy: V_{pi}(s) = sum _{over a_i} {pi(a_i | s) * Q(s, a_i)} Hint: all other methods from QLearningAgent are still accessible. """ epsilon = self.epsilon possible_actions = self.get_legal_actions(state) # If there are no legal actions, return 0.0 if len(possible_actions) == 0: return 0.0 <YOUR CODE HERE: SEE DOCSTRING > return state_value # - # ### Cliff World # # Let's now see how our algorithm compares against q-learning in case where we force agent to explore all the time. # # <img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/cliffworld.png width=600> # <center><i>image by cs188</i></center> # + import gym import gym.envs.toy_text env = gym.envs.toy_text.CliffWalkingEnv() n_actions = env.action_space.n print(env.__doc__) # - # Our cliffworld has one difference from what's on the image: there is no wall. # Agent can choose to go as close to the cliff as it wishes. x:start, T:exit, C:cliff, o: flat ground env.render() def play_and_train(env, agent, t_max=10**4): """This function should - run a full game, actions given by agent.getAction(s) - train agent using agent.update(...) whenever possible - return total reward""" total_reward = 0.0 s = env.reset() for t in range(t_max): a = agent.get_action(s) next_s, r, done, _ = env.step(a) agent.update(s, a, r, next_s) s = next_s total_reward += r if done: break return total_reward # + from qlearning import QLearningAgent agent_sarsa = EVSarsaAgent(alpha=0.25, epsilon=0.2, discount=0.99, get_legal_actions=lambda s: range(n_actions)) agent_ql = QLearningAgent(alpha=0.25, epsilon=0.2, discount=0.99, get_legal_actions=lambda s: range(n_actions)) # + from IPython.display import clear_output from pandas import DataFrame def moving_average(x, span=100): return DataFrame( {'x': np.asarray(x)}).x.ewm(span=span).mean().values rewards_sarsa, rewards_ql = [], [] for i in range(5000): rewards_sarsa.append(play_and_train(env, agent_sarsa)) rewards_ql.append(play_and_train(env, agent_ql)) # Note: agent.epsilon stays constant if i % 100 == 0: clear_output(True) print('EVSARSA mean reward =', np.mean(rewards_sarsa[-100:])) print('QLEARNING mean reward =', np.mean(rewards_ql[-100:])) plt.title("epsilon = %s" % agent_ql.epsilon) plt.plot(moving_average(rewards_sarsa), label='ev_sarsa') plt.plot(moving_average(rewards_ql), label='qlearning') plt.grid() plt.legend() plt.ylim(-500, 0) plt.show() # - # Let's now see what did the algorithms learn by visualizing their actions at every state. def draw_policy(env, agent): """ Prints CliffWalkingEnv policy with arrows. Hard-coded. """ n_rows, n_cols = env._cliff.shape actions = '^>v<' for yi in range(n_rows): for xi in range(n_cols): if env._cliff[yi, xi]: print(" C ", end='') elif (yi * n_cols + xi) == env.start_state_index: print(" X ", end='') elif (yi * n_cols + xi) == n_rows * n_cols - 1: print(" T ", end='') else: print(" %s " % actions[agent.get_best_action(yi * n_cols + xi)], end='') print() # + print("Q-Learning") draw_policy(env, agent_ql) print("SARSA") draw_policy(env, agent_sarsa) # - # ### More on SARSA # # Here are some of the things you can do if you feel like it: # # * Play with epsilon. See learned how policies change if you set epsilon to higher/lower values (e.g. 0.75). # * Expected Value SASRSA for softmax policy __(2pts)__: # $$ \pi(a_i|s) = softmax({Q(s,a_i) \over \tau}) = {e ^ {Q(s,a_i)/ \tau} \over {\sum_{a_j} e ^{Q(s,a_j) / \tau }}} $$ # * Implement N-step algorithms and TD($\lambda$): see [Sutton's book](http://incompleteideas.net/book/bookdraft2018jan1.pdf) chapter 7 and chapter 12. # * Use those algorithms to train on CartPole in previous / next assignment for this week. # ## Part II: experience replay (4 points) # # There's a powerful technique that you can use to improve sample efficiency for off-policy algorithms: [spoiler] Experience replay :) # # The catch is that you can train Q-learning and EV-SARSA on `<s,a,r,s'>` tuples even if they aren't sampled under current agent's policy. So here's what we're gonna do: # # <img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/exp_replay.png width=480> # # #### Training with experience replay # 1. Play game, sample `<s,a,r,s'>`. # 2. Update q-values based on `<s,a,r,s'>`. # 3. Store `<s,a,r,s'>` transition in a buffer. # 3. If buffer is full, delete earliest data. # 4. Sample K such transitions from that buffer and update q-values based on them. # # # To enable such training, first we must implement a memory structure that would act like such a buffer. # + # In google collab, uncomment this: # # !wget https://bit.ly/2FMJP5K -q -O setup.py # # !bash setup.py 2>&1 1>stdout.log | tee stderr.log # This code creates a virtual display to draw game images on. # If you are running locally, just ignore it import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start # %env DISPLAY = : 1 # %load_ext autoreload # %autoreload 2 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import clear_output # + import random class ReplayBuffer(object): def __init__(self, size): """ Create Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. Note: for this assignment you can pick any data structure you want. If you want to keep it simple, you can store a list of tuples of (s, a, r, s') in self._storage However you may find out there are faster and/or more memory-efficient ways to do so. """ self._storage = [] self._maxsize = size # OPTIONAL: YOUR CODE def __len__(self): return len(self._storage) def add(self, obs_t, action, reward, obs_tp1, done): ''' Make sure, _storage will not exceed _maxsize. Make sure, FIFO rule is being followed: the oldest examples has to be removed earlier ''' data = (obs_t, action, reward, obs_tp1, done) # add data to storage <YOUR CODE > def sample(self, batch_size): """Sample a batch of experiences. Parameters ---------- batch_size: int How many transitions to sample. Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. """ idxes = <randomly generate batch_size integers to be used as indexes of samples > # collect <s,a,r,s',done> for each index <YOUR CODE > return np.array( < states > ), np.array( < actions > ), np.array( < rewards > ), np.array( < next_states > ), np.array( < is_done > ) # - # Some tests to make sure your buffer works right replay = ReplayBuffer(2) obj1 = tuple(range(5)) obj2 = tuple(range(5, 10)) replay.add(*obj1) assert replay.sample( 1) == obj1, "If there's just one object in buffer, it must be retrieved by buf.sample(1)" replay.add(*obj2) assert len(replay) == 2, "Please make sure __len__ methods works as intended." replay.add(*obj2) assert len(replay) == 2, "When buffer is at max capacity, replace objects instead of adding new ones." assert tuple(np.unique(a) for a in replay.sample(100)) == obj2 replay.add(*obj1) assert max(len(np.unique(a)) for a in replay.sample(100)) == 2 replay.add(*obj1) assert tuple(np.unique(a) for a in replay.sample(100)) == obj1 print("Success!") # Now let's use this buffer to improve training: # + import gym from qlearning import QLearningAgent env = gym.make("Taxi-v2") n_actions = env.action_space.n # - def play_and_train_with_replay(env, agent, replay=None, t_max=10**4, replay_batch_size=32): """ This function should - run a full game, actions given by agent.getAction(s) - train agent using agent.update(...) whenever possible - return total reward :param replay: ReplayBuffer where agent can store and sample (s,a,r,s',done) tuples. If None, do not use experience replay """ total_reward = 0.0 s = env.reset() for t in range(t_max): # get agent to pick action given state s a = <YOUR CODE > next_s, r, done, _ = env.step(a) # update agent on current transition. Use agent.update <YOUR CODE > if replay is not None: # store current <s,a,r,s'> transition in buffer <YOUR CODE > # sample replay_batch_size random transitions from replay, # then update agent on each of them in a loop <YOUR CODE > s = next_s total_reward += r if done: break return total_reward # + # Create two agents: first will use experience replay, second will not. agent_baseline = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99, get_legal_actions=lambda s: range(n_actions)) agent_replay = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99, get_legal_actions=lambda s: range(n_actions)) replay = ReplayBuffer(1000) # + from IPython.display import clear_output rewards_replay, rewards_baseline = [], [] for i in range(1000): rewards_replay.append( play_and_train_with_replay(env, agent_replay, replay)) rewards_baseline.append(play_and_train_with_replay( env, agent_baseline, replay=None)) agent_replay.epsilon *= 0.99 agent_baseline.epsilon *= 0.99 if i % 100 == 0: clear_output(True) print('Baseline : eps =', agent_replay.epsilon, 'mean reward =', np.mean(rewards_baseline[-10:])) print('ExpReplay: eps =', agent_baseline.epsilon, 'mean reward =', np.mean(rewards_replay[-10:])) plt.plot(moving_average(rewards_replay), label='exp. replay') plt.plot(moving_average(rewards_baseline), label='baseline') plt.grid() plt.legend() plt.show() # - # #### What to expect: # # Experience replay, if implemented correctly, will improve algorithm's initial convergence a lot, but it shouldn't affect the final performance. # # ### Outro # # We will use the code you just wrote extensively in the next week of our course. If you're feeling that you need more examples to understand how experience replay works, try using it for binarized state spaces (CartPole or other __[classic control envs](https://gym.openai.com/envs/#classic_control)__). # # __Next week__ we're gonna explore how q-learning and similar algorithms can be applied for large state spaces, with deep learning models to approximate the Q function. # # However, __the code you've written__ for this week is already capable of solving many RL problems, and as an added benifit - it is very easy to detach. You can use Q-learning, SARSA and Experience Replay for any RL problems you want to solve - just thow 'em into a file and import the stuff you need. # ### Bonus I: TD($\lambda$) (5+ points) # # There's a number of advanced algorithms you can find in week 3 materials (Silver lecture II and/or reading about eligibility traces). One such algorithm is TD(lambda), which is based on the idea of eligibility traces. You can also view it as a combination of N-step updates for alll N. # * N-step temporal difference from Sutton's book - [url](http://incompleteideas.net/book/the-book-2nd.html), page 142 / chapter 7 # * Eligibility traces from Sutton's book - same url, chapter 12 / page 278 # * Blog post on eligibility traces - [url](http://pierrelucbacon.com/traces/) # # Here's a practical algorithm you can start with: [url](https://stackoverflow.com/questions/40862578/how-to-understand-watkinss-q%CE%BB-learning-algorithm-in-suttonbartos-rl-book/40892302) # # # Implementing this algorithm will prove more challenging than q-learning or sarsa, but doing so will earn you a deeper understanding of how value-based methods work [in addition to some bonus points]. # # More kudos for comparing and analyzing TD($\lambda$) against Q-learning and EV-SARSA in different setups (taxi vs cartpole, constant epsilon vs decreasing epsilon). # ### Bonus II: More pacman (5+ points) # # __see README.md for software requirements of seminar_py2__ # # Remember seminar_py2 where your vanilla q-learning had hard time solving Pacman even on a small grid? Now's the time to fix that issue. # # We'll focus on those grids for pacman setup. # * python pacman.py -p PacmanQAgent -x N_TRAIN_GAMES -n N_TOTAL_GAMES -l __mediumGrid__ # * python pacman.py -p PacmanQAgent -x N_TRAIN_GAMES -n N_TOTAL_GAMES -l __mediumClassic__ # # Even if you adjust N_TRAIN_GAMES to 10^5 and N_TOTAL_GAMES to 10^5+100 (100 last games are for test), pacman won't solve those environments # # The problem with those environments is that they have a large amount of unique states. However, you can devise a smaller environment state by choosing different observation parameters, e.g.: # * distance and direction to nearest ghost # * where is nearest food # * 'center of mass' of all food points (and variance, and whatever) # * is there a wall in each direction # * and anything else you see fit # # Here's how to get this information from [state](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L49), # * Get pacman position: [state.getPacmanPosition()](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L128) # * Is there a wall at (x,y)?: [state.hasWall(x,y)](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L189) # * Get ghost positions: [state.getGhostPositions()](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L144) # * Get all food positions: [state.getCapsules()](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L153) # # You can call those methods anywhere you see state. # * e.g. in [agent.getValue(state)](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L52) # * Defining a function that extracts all features and calling it in [getQValue](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L38) and [setQValue](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L44) is probably enough. # * You can also change agent parameters. The simplest way is to hard-code them in [PacmanQAgent](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L140) # # Also, don't forget to optimize ```learning_rate```, ```discount``` and ```epsilon``` params of model, this may also help to solve this env.
week03_model_free/homework.ipynb
# + # Copyright 2010 <NAME> <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pandigital numbers in Google CP Solver. From <NAME> 'Recreations in the Theory of Numbers', quoted from http://www.worldofnumbers.com/ninedig1.htm ''' Chapter VIII : Digits - and the magic of 9 The following curious table shows how to arrange the 9 digits so that the product of 2 groups is equal to a number represented by the remaining digits. 12 x 483 = 5796 42 x 138 = 5796 18 x 297 = 5346 27 x 198 = 5346 39 x 186 = 7254 48 x 159 = 7632 28 x 157 = 4396 4 x 1738 = 6952 4 x 1963 = 7852 ''' See also MathWorld http://mathworld.wolfram.com/PandigitalNumber.html ''' A number is said to be pandigital if it contains each of the digits from 0 to 9 (and whose leading digit must be nonzero). However, 'zeroless' pandigital quantities contain the digits 1 through 9. Sometimes exclusivity is also required so that each digit is restricted to appear exactly once. ''' * Wikipedia http://en.wikipedia.org/wiki/Pandigital_number Compare with the the following models: * MiniZinc: http://www.hakank.org/minizinc/pandigital_numbers.mzn * Comet : http://www.hakank.org/comet/pandigital_numbers.co * ECLiPSe : http://www.hakank.org/eclipse/pandigital_numbers.ecl * Gecode/R: http://www.hakank.org/gecoder/pandigital_numbers.rb * ECLiPSe : http://hakank.org/eclipse/pandigital_numbers.ecl * SICStus : http://hakank.org/sicstus/pandigital_numbers.pl This model was created by <NAME> (<EMAIL>) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ from __future__ import print_function import sys from ortools.constraint_solver import pywrapcp # # converts a number (s) <-> an array of integers (t) in the specific base. # def toNum(solver, t, s, base): tlen = len(t) solver.Add( s == solver.Sum([(base**(tlen - i - 1)) * t[i] for i in range(tlen)])) # Create the solver. solver = pywrapcp.Solver("Pandigital numbers") # # data # max_d = base - 1 x_len = max_d + 1 - start max_num = base**4 - 1 # # declare variables # num1 = solver.IntVar(0, max_num, "num1") num2 = solver.IntVar(0, max_num, "num2") res = solver.IntVar(0, max_num, "res") x = [solver.IntVar(start, max_d, "x[%i]" % i) for i in range(x_len)] # # constraints # solver.Add(solver.AllDifferent(x)) toNum(solver, [x[i] for i in range(len1)], num1, base) toNum(solver, [x[i] for i in range(len1, len1 + len2)], num2, base) toNum(solver, [x[i] for i in range(len1 + len2, x_len)], res, base) solver.Add(num1 * num2 == res) # no number must start with 0 solver.Add(x[0] > 0) solver.Add(x[len1] > 0) solver.Add(x[len1 + len2] > 0) # symmetry breaking solver.Add(num1 < num2) # # solution and search # solution = solver.Assignment() solution.Add(x) solution.Add(num1) solution.Add(num2) solution.Add(res) db = solver.Phase(x, solver.INT_VAR_SIMPLE, solver.INT_VALUE_DEFAULT) solver.NewSearch(db) num_solutions = 0 solutions = [] while solver.NextSolution(): print_solution([x[i].Value() for i in range(x_len)], len1, len2, x_len) num_solutions += 1 solver.EndSearch() if 0 and num_solutions > 0: print() print("num_solutions:", num_solutions) print("failures:", solver.Failures()) print("branches:", solver.Branches()) print("WallTime:", solver.WallTime()) print() def print_solution(x, len1, len2, x_len): print("".join([str(x[i]) for i in range(len1)]), "*", end=" ") print("".join([str(x[i]) for i in range(len1, len1 + len2)]), "=", end=" ") print("".join([str(x[i]) for i in range(len1 + len2, x_len)])) base = 10 start = 1
examples/notebook/contrib/pandigital_numbers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Running all webscrapping scripts # Importing moduels: # + import requests from requests import get from requests.exceptions import RequestException from contextlib import closing from bs4 import BeautifulSoup import pandas as pd import numpy as np import re import os from datetime import datetime # - # The file placement_univ_acronym contains info like: full name of each university; the location; ranks from usnews; links of econ placement; some other notes; acronyms of each university. links = pd.read_excel("placement_univ_acronym.xlsx") links.head() # Functions to use in webscrapping: # + def simple_get(url): """ Attempts to get the content at `url` by making an HTTP GET request. If the content-type of response is some kind of HTML/XML, return the text content, otherwise return None. """ try: with closing(get(url, stream=True)) as resp: if is_good_response(resp): return resp.content else: return None except RequestException as e: log_error('Error during requests to {0} : {1}'.format(url, str(e))) return None def is_good_response(resp): """ Returns True if the response seems to be HTML, False otherwise. """ content_type = resp.headers['Content-Type'].lower() return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1) def log_error(e): """ It is always a good idea to log errors. This function just prints them, but you can make it do anything. """ print(e) def find_pattern(s, srt, end): pattern = r'(?<=' + srt + ').*(?=' + end + ')' name_pattern = re.compile(pattern, flags = re.M) names = name_pattern.findall(s) return(names) def assign_year(s, names, year): name_idx = [s.find(i) for i in names] year_idx = [s.find(i) for i in year] year_idx.append(len(s)) name_year = [] for i in name_idx: name_year.extend([year[j] for j in range(len(year_idx)) if i >= year_idx[j] and i < year_idx[j + 1]]) return(name_year) def fast_parse_1(html, name_parser, placement_parser, year_parser): # print(html) # read the content of the placement website raw_html = simple_get(html) html = BeautifulSoup(raw_html, 'html.parser') # store the year and placement info names = [] year = [] fileds = [] placement = [] s = str(html) srt, end = placement_parser placement = find_pattern(s, srt, end) # print('Length of placement list: {}'.format(len(placement))) if name_parser != [None, None]: srt, end = name_parser names = find_pattern(s, srt, end) # print('Length of name list: {}'.format(len(names))) else: names = [None] * len(placement) srt, end = year_parser year = find_pattern(s, srt, end) year_new = [srt + i for i in year] name_year = assign_year(s, placement, year_new) name_year = [i.replace(srt, '') for i in name_year] tmp = [[name_year[i], names[i], placement[i]] for i in range(len(name_year))] tmp = pd.DataFrame(tmp) tmp.columns = ['Year', 'Name', 'Placement'] return(tmp) if len(names) == len(placement): srt, end = year_parser year = find_pattern(s, srt, end) year_new = [srt + i for i in year] name_year = assign_year(s, names, year_new) name_year = [i.replace(srt, '') for i in name_year] tmp = [[name_year[i], names[i], placement[i]] for i in range(len(name_year))] tmp = pd.DataFrame(tmp) tmp.columns = ['Year', 'Name', 'Placement'] return(tmp) else: return([placement, names]) def fast_parse_2(html, name_parser, placement_parser, year_parser): # print(html) # read the content of the placement website raw_html = simple_get(html) html = BeautifulSoup(raw_html, 'html.parser') # store the year and placement info names = [] year = [] placement = [] s = str(html) srt, end = placement_parser placement = find_pattern(s, srt, end) # print('Length of placement list: {}'.format(len(placement))) srt, end = name_parser names = find_pattern(s, srt, end) # print('Length of name list: {}'.format(len(names))) srt, end = year_parser year = find_pattern(s, srt, end) # print('Length of year list: {}'.format(len(year))) if len(names) == len(placement) and len(names) == len(year): tmp = [[year[i], names[i], placement[i]] for i in range(len(names))] tmp = pd.DataFrame(tmp) tmp.columns = ['Year', 'Name', 'Placement'] return(tmp) else: return([placement, names, year]) def print_out_html(html): # print(html) # read the content of the placement website raw_html = simple_get(html) html = BeautifulSoup(raw_html, 'html.parser') s = str(html) return(s) # - # For each university with a valid webscrapping scripts, save the econ placement csv files. # + # acquire the file directory and all websrapping scripts for (dirpath, dirnames, filenames) in os.walk('websracp_univ'): dr, fn_li = dirpath, filenames for fn in fn_li: str_command = "exec(open('" + str(dirpath) + "//" + str(fn) + "').read())" exec(str_command) univ = fn.split('.')[0] print('Processing university: {}'.format(univ)) str_command = str(univ) + "['Acronym'] = '" + str(univ) + "'" exec(str_command) str_command = str(univ) + ".to_csv('data_by_univ//" + str(univ) + ".csv', index = False)" exec(str_command) # - # Combine all saved csvs into one: # + for (dirpath, dirnames, filenames) in os.walk('data_by_univ'): dr, fn_li = dirpath, filenames df = pd.DataFrame() for fn in fn_li: str_command = "df1 = pd.read_csv('" + str(dirpath) + "//" + str(fn) + "')" exec(str_command) # print(str_command) # print(df1.columns) df = df.append(df1) print('Number of obs: {}'.format(len(df))) # - # Save the combined placement dataset: # + add_date = True if add_date: filename = 'data//data' + datetime.today().strftime('%Y-%m-%d') + '.csv' else: filename = "data//data.csv" df.to_csv(filename, index = False) # - # This is a list of universities contained in our sample: df['Acronym'].unique()
RunAll.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mรณdulo 2: Scraping con Selenium # ## LATAM Airlines # <a href="https://www.latam.com/es_ar/"><img src="https://i.pinimg.com/originals/dd/52/74/dd5274702d1382d696caeb6e0f6980c5.png" width="420"></img></a> # <br> # # Vamos a scrapear el sitio de Latam para averiguar datos de vuelos en funcion el origen y destino, fecha y cabina. La informaciรณn que esperamos obtener de cada vuelo es: # - Precio(s) disponibles # - Horas de salida y llegada (duraciรณn) # - Informaciรณn de las escalas # # ยกEmpecemos! url = 'https://www.latam.com/es_ar/apps/personas/booking?fecha1_dia=20&fecha1_anomes=2019-12&auAvailability=1&ida_vuelta=ida&vuelos_origen=Buenos%20Aires&from_city1=BUE&vuelos_destino=Madrid&to_city1=MAD&flex=1&vuelos_fecha_salida_ddmmaaaa=20/12/2019&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo=' from selenium import webdriver options = webdriver.ChromeOptions() options.add_argument('--incognito') driver = webdriver.Chrome(executable_path='../../chromedriver', options=options) driver.get(url) #Usaremos el Xpath para obtener la lista de vuelos vuelos = driver.find_elements_by_xpath('//li[@class="flight"]') vuelo = vuelos[0] # Obtenemos la informaciรณn de la hora de salida, llegada y duraciรณn del vuelo # Hora de salida vuelo.find_element_by_xpath('.//div[@class="departure"]/time').get_attribute('datetime') # Hora de llegada vuelo.find_element_by_xpath('.//div[@class="arrival"]/time').get_attribute('datetime') # Duraciรณn del vuelo vuelo.find_element_by_xpath('.//span[@class="duration"]/time').get_attribute('datetime') # # Clase 3 # En esta clase veremos cรณmo obtener la informaciรณn de las escalas de cada vuelo. Vayamos a la pรกgina y veamos dรณnde se encuentran esos datos.<br> # Para desplegar esa informaciรณn, debemos clickear en un botรณn. Seleccionรฉmoslo: boton_escalas = vuelo.find_element_by_xpath('.//div[@class="flight-summary-stops-description"]/button') boton_escalas # Una vez ubicado el elemento podemos clickear sobre รฉl boton_escalas.click() # Y vemos cรณmo se despliega la informaciรณn que estamos buscando. **Notar que cambiรณ el html de la pรกgina al hacer click sobre ese botรณn** # De cada segmento necesitamos: # - origen # - hora de salida # - destino # - hora de llegada # - duraciรณn del vuelo # - numero de vuelo # - modelo de avion # - duraciรณn de la escala segmentos = vuelo.find_elements_by_xpath('//div[@class="segments-graph"]/div[@class="segments-graph-segment"]') segmentos # Cerremos la ventana con la informaciรณn de los segmentos y ejecutemos nuevamente la celda anterior para ver que devuelve una lista vacรญa # La cantidad de escalas serรก la cantidad de segmentos -1. escalas = len(segmentos) - 1 #0 escalas si es un vuelo directo # Hasta aquรญ llegamos en esta clase, en la que vimos cรณmo utilizar Selenium para intearctuar con elementos de la pร gina web para obtener informaciรณn que estaba "oculta" driver.close()
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases_old/Mรณdulo 3_ Scraping con Selenium/M3C3. Interactuando con los elementos - Script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color=Black>TRIP ADVISER Review Analysis </font> # ## Data Source # As part of this analysis, we analyze the Hotel reviews from **Tripadviser**. we could find the [Trip Adviser dataset here](https://github.com/kavgan/OpinRank). This analysis is focused to **Text Graph** technique. # + # Required Packages from collections import Counter, defaultdict from scipy.sparse import lil_matrix, spmatrix, csr_matrix, save_npz, load_npz import matplotlib.pyplot as plt import numpy as np import string import os import re import pandas as pd import sys from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer import re from gensim.models import Word2Vec import gensim from gensim import corpora from gensim.utils import simple_preprocess from gensim.parsing.preprocessing import STOPWORDS from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.stem.porter import * import nltk import datetime np.random.seed(2018) # - #Selecting cities and data path DataPath="E:\\Techi\\Courses\\Masters in Data Science\\Courses\\Text Visualization\\Data sets\\Trip Advisor\\hotels\\data\\" Cities=['new-delhi','beijing', 'chicago', 'dubai', 'las-vegas', 'london', 'montreal', 'new-york-city', 'san-francisco', 'shanghai'] Stopwords=["nan","quot","amp","a", "about", "above", "after", "again", "against", "ain", "all", "am", "an", "and", "any", "are", "aren", "aren't", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "can", "couldn", "couldn't", "d", "did", "didn", "didn't", "do", "does", "doesn", "doesn't", "doing", "don", "don't", "down", "during", "each", "few", "for", "from", "further", "had", "hadn", "hadn't", "has", "hasn", "hasn't", "have", "haven", "haven't", "having", "he", "her", "here", "hers", "herself", "him", "himself", "his", "how", "i", "if", "in", "into", "is", "isn", "isn't", "it", "it's", "its", "itself", "just", "ll", "m", "ma", "me", "mightn", "mightn't", "more", "most", "mustn", "mustn't", "my", "myself", "needn", "needn't", "no", "nor", "not", "now", "o", "of", "off", "on", "once", "only", "or", "other", "our", "ours", "ourselves", "out", "over", "own", "re", "s", "same", "shan", "shan't", "she", "she's", "should", "should've", "shouldn", "shouldn't", "so", "some", "such", "t", "than", "that", "that'll", "the", "their", "theirs", "them", "themselves", "then", "there", "these", "they", "this", "those", "through", "to", "too", "under", "until", "up", "ve", "very", "was", "wasn", "wasn't", "we", "were", "weren", "weren't", "what", "when", "where", "which", "while", "who", "whom", "why", "will", "with", "won", "won't", "wouldn", "wouldn't", "y", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves", "could", "he'd", "he'll", "he's", "here's", "how's", "i'd", "i'll", "i'm", "i've", "let's", "ought", "she'd", "she'll", "that's", "there's", "they'd", "they'll", "they're", "they've", "we'd", "we'll", "we're", "we've", "what's", "when's", "where's", "who's", "why's", "would", "able", "abst", "accordance", "according", "accordingly", "across", "act", "actually", "added", "adj", "affected", "affecting", "affects", "afterwards", "ah", "almost", "alone", "along", "already", "also", "although", "always", "among", "amongst", "announce", "another", "anybody", "anyhow", "anymore", "anyone", "anything", "anyway", "anyways", "anywhere", "apparently", "approximately", "arent", "arise", "around", "aside", "ask", "asking", "auth", "available", "away", "awfully", "b", "back", "became", "become", "becomes", "becoming", "beforehand", "begin", "beginning", "beginnings", "begins", "behind", "believe", "beside", "besides", "beyond", "biol", "brief", "briefly", "c", "ca", "came", "cannot", "can't", "cause", "causes", "certain", "certainly", "co", "com", "come", "comes", "contain", "containing", "contains", "couldnt", "date", "different", "done", "downwards", "due", "e", "ed", "edu", "effect", "eg", "eight", "eighty", "either", "else", "elsewhere", "end", "ending", "enough", "especially", "et", "etc", "even", "ever", "every", "everybody", "everyone", "everything", "everywhere", "ex", "except", "f", "far", "ff", "fifth", "first", "five", "fix", "followed", "following", "follows", "former", "formerly", "forth", "found", "four", "furthermore", "g", "gave", "get", "gets", "getting", "give", "given", "gives", "giving", "go", "goes", "gone", "got", "gotten", "h", "happens", "hardly", "hed", "hence", "hereafter", "hereby", "herein", "heres", "hereupon", "hes", "hi", "hid", "hither", "home", "howbeit", "however", "hundred", "id", "ie", "im", "immediate", "immediately", "importance", "important", "inc", "indeed", "index", "information", "instead", "invention", "inward", "itd", "it'll", "j", "k", "keep", "keeps", "kept", "kg", "km", "know", "known", "knows", "l", "largely", "last", "lately", "later", "latter", "latterly", "least", "less", "lest", "let", "lets", "like", "liked", "likely", "line", "little", "'ll", "look", "looking", "looks", "ltd", "made", "mainly", "make", "makes", "many", "may", "maybe", "mean", "means", "meantime", "meanwhile", "merely", "mg", "might", "million", "miss", "ml", "moreover", "mostly", "mr", "mrs", "much", "mug", "must", "n", "na", "name", "namely", "nay", "nd", "near", "nearly", "necessarily", "necessary", "need", "needs", "neither", "never", "nevertheless", "new", "next", "nine", "ninety", "nobody", "non", "none", "nonetheless", "noone", "normally", "nos", "noted", "nothing", "nowhere", "obtain", "obtained", "obviously", "often", "oh", "ok", "okay", "old", "omitted", "one", "ones", "onto", "ord", "others", "otherwise", "outside", "overall", "owing", "p", "page", "pages", "part", "particular", "particularly", "past", "per", "perhaps", "placed", "please", "plus", "poorly", "possible", "possibly", "potentially", "pp", "predominantly", "present", "previously", "primarily", "probably", "promptly", "proud", "provides", "put", "q", "que", "quickly", "quite", "qv", "r", "ran", "rather", "rd", "readily", "really", "recent", "recently", "ref", "refs", "regarding", "regardless", "regards", "related", "relatively", "research", "respectively", "resulted", "resulting", "results", "right", "run", "said", "saw", "say", "saying", "says", "sec", "section", "see", "seeing", "seem", "seemed", "seeming", "seems", "seen", "self", "selves", "sent", "seven", "several", "shall", "shed", "shes", "show", "showed", "shown", "showns", "shows", "significant", "significantly", "similar", "similarly", "since", "six", "slightly", "somebody", "somehow", "someone", "somethan", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry", "specifically", "specified", "specify", "specifying", "still", "stop", "strongly", "sub", "substantially", "successfully", "sufficiently", "suggest", "sup", "sure", "take", "taken", "taking", "tell", "tends", "th", "thank", "thanks", "thanx", "thats", "that've", "thence", "thereafter", "thereby", "thered", "therefore", "therein", "there'll", "thereof", "therere", "theres", "thereto", "thereupon", "there've", "theyd", "theyre", "think", "thou", "though", "thoughh", "thousand", "throug", "throughout", "thru", "thus", "til", "tip", "together", "took", "toward", "towards", "tried", "tries", "truly", "try", "trying", "ts", "twice", "two", "u", "un", "unfortunately", "unless", "unlike", "unlikely", "unto", "upon", "ups", "us", "use", "used", "useful", "usefully", "usefulness", "uses", "using", "usually", "v", "value", "various", "'ve", "via", "viz", "vol", "vols", "vs", "w", "want", "wants", "wasnt", "way", "wed", "welcome", "went", "werent", "whatever", "what'll", "whats", "whence", "whenever", "whereafter", "whereas", "whereby", "wherein", "wheres", "whereupon", "wherever", "whether", "whim", "whither", "whod", "whoever", "whole", "who'll", "whomever", "whos", "whose", "widely", "willing", "wish", "within", "without", "wont", "words", "world", "wouldnt", "www", "x", "yes", "yet", "youd", "youre", "z", "zero", "a's", "ain't", "allow", "allows", "apart", "appear", "appreciate", "appropriate", "associated", "best", "better", "c'mon", "c's", "cant", "changes", "clearly", "concerning", "consequently", "consider", "considering", "corresponding", "course", "currently", "definitely", "described", "despite", "entirely", "exactly", "example", "going", "greetings", "hello", "help", "hopefully", "ignored", "inasmuch", "indicate", "indicated", "indicates", "inner", "insofar", "it'd", "keep", "keeps", "novel", "presumably", "reasonably", "second", "secondly", "sensible", "serious", "seriously", "sure", "t's", "third", "thorough", "thoroughly", "three", "well", "wonder", "a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also", "although", "always", "am", "among", "amongst", "amoungst", "amount", "an", "and", "another", "any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are", "around", "as", "at", "back", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven", "else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own", "part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "co", "op", "research-articl", "pagecount", "cit", "ibid", "les", "le", "au", "que", "est", "pas", "vol", "el", "los", "pp", "u201d", "well-b", "http", "volumtype", "par", "0o", "0s", "3a", "3b", "3d", "6b", "6o", "a1", "a2", "a3", "a4", "ab", "ac", "ad", "ae", "af", "ag", "aj", "al", "an", "ao", "ap", "ar", "av", "aw", "ax", "ay", "az", "b1", "b2", "b3", "ba", "bc", "bd", "be", "bi", "bj", "bk", "bl", "bn", "bp", "br", "bs", "bt", "bu", "bx", "c1", "c2", "c3", "cc", "cd", "ce", "cf", "cg", "ch", "ci", "cj", "cl", "cm", "cn", "cp", "cq", "cr", "cs", "ct", "cu", "cv", "cx", "cy", "cz", "d2", "da", "dc", "dd", "de", "df", "di", "dj", "dk", "dl", "do", "dp", "dr", "ds", "dt", "du", "dx", "dy", "e2", "e3", "ea", "ec", "ed", "ee", "ef", "ei", "ej", "el", "em", "en", "eo", "ep", "eq", "er", "es", "et", "eu", "ev", "ex", "ey", "f2", "fa", "fc", "ff", "fi", "fj", "fl", "fn", "fo", "fr", "fs", "ft", "fu", "fy", "ga", "ge", "gi", "gj", "gl", "go", "gr", "gs", "gy", "h2", "h3", "hh", "hi", "hj", "ho", "hr", "hs", "hu", "hy", "i", "i2", "i3", "i4", "i6", "i7", "i8", "ia", "ib", "ic", "ie", "ig", "ih", "ii", "ij", "il", "in", "io", "ip", "iq", "ir", "iv", "ix", "iy", "iz", "jj", "jr", "js", "jt", "ju", "ke", "kg", "kj", "km", "ko", "l2", "la", "lb", "lc", "lf", "lj", "ln", "lo", "lr", "ls", "lt", "m2", "ml", "mn", "mo", "ms", "mt", "mu", "n2", "nc", "nd", "ne", "ng", "ni", "nj", "nl", "nn", "nr", "ns", "nt", "ny", "oa", "ob", "oc", "od", "of", "og", "oi", "oj", "ol", "om", "on", "oo", "oq", "or", "os", "ot", "ou", "ow", "ox", "oz", "p1", "p2", "p3", "pc", "pd", "pe", "pf", "ph", "pi", "pj", "pk", "pl", "pm", "pn", "po", "pq", "pr", "ps", "pt", "pu", "py", "qj", "qu", "r2", "ra", "rc", "rd", "rf", "rh", "ri", "rj", "rl", "rm", "rn", "ro", "rq", "rr", "rs", "rt", "ru", "rv", "ry", "s2", "sa", "sc", "sd", "se", "sf", "si", "sj", "sl", "sm", "sn", "sp", "sq", "sr", "ss", "st", "sy", "sz", "t1", "t2", "t3", "tb", "tc", "td", "te", "tf", "th", "ti", "tj", "tl", "tm", "tn", "tp", "tq", "tr", "ts", "tt", "tv", "tx", "ue", "ui", "uj", "uk", "um", "un", "uo", "ur", "ut", "va", "wa", "vd", "wi", "vj", "vo", "wo", "vq", "vt", "vu", "x1", "x2", "x3", "xf", "xi", "xj", "xk", "xl", "xn", "xo", "xs", "xt", "xv", "xx", "y2", "yj", "yl", "yr", "ys", "yt", "zi", "zz"] Custom_Stopwords=["nan","quot","hotel","room"] # ## Data Retrivel and Processing # # Below code collect reviews from all files and append it. Dataset=pd.read_csv(DataPath+"text.txt", names=['Date','Title','Content'], delimiter="\t", index_col=3, encoding = "ISO-8859-1") Dataset["City"]="Test" Dataset.reset_index() for city in Cities : print("Started : "+city) for i in os.listdir(DataPath+city+"\\"): try : Dataset2=pd.read_csv(DataPath+city+"\\"+i, names=['Date','Title','Content'], delimiter="\t", index_col=3, encoding = "ISO-8859-1") Dataset2.reset_index() Dataset2["City"]=city Dataset2["Hotel"]=i Dataset=pd.concat([Dataset,Dataset2],ignore_index=True,axis=0, sort=True) except : print(city+"\\"+i+" file is corrupted") print("Completed : "+city) # ## Extracting date features Dataset["Date"]=pd.to_datetime(Dataset["Date"]) Dataset.index=Dataset.Date Dataset["Year"]=Dataset.index.year Dataset["Month"]=Dataset.index.month # ## Extracting hotel ratings city_review=pd.read_csv(DataPath+"city_text.txt", header=0 , delimiter="\t",encoding = "ISO-8859-1") for city in Cities : try : city_review2=pd.read_csv(DataPath+city+".csv",header=0 , delimiter=",", encoding = "ISO-8859-1",index_col=False) city_review=pd.concat([city_review,city_review2],ignore_index=True,axis=0, sort=True) except : print(city+"\\"+i+" file is corrupted") city_review['overall_ratingsource']=city_review['overall_ratingsource'].replace(-1,np.NaN) #city_review[['doc_id','country','overall_ratingsource']] # ## Data Preprocess Function # + # Taken from Gensim preparators RE_PUNCT = re.compile(r'([%s])+' % re.escape(string.punctuation), re.UNICODE) RE_NUMERIC = re.compile(r"[0-9]+", re.UNICODE) RE_NONALPHA = re.compile(r"\W", re.UNICODE) RE_WHITESPACE = re.compile(r"(\s)+", re.UNICODE) MIN_LENGTH = 3 def normalise(s): if s is None or s is np.nan: return "" s = s.lower() s = RE_PUNCT.sub(" ", s) s = RE_WHITESPACE.sub(" ", s) s = RE_NUMERIC.sub("", s) s = ' '.join([w for w in s.split() if len(w)>=MIN_LENGTH]) s = RE_NONALPHA.sub(" ", s) return s #1 for skip-gram; otherwise CBOW. def create_model(Sentences2, Model_Name, sg1=0): print('Creating the model is starting : '+str(datetime.datetime.now())) model_review = Word2Vec(sentences=list(pd.Series(Sentences2).dropna()) , size=100, window=5, min_count=1, workers=4, sg=sg1) print('Creating the model is completed : '+str(datetime.datetime.now())) if sg1==0 : model_review.save("model\\"+Model_Name+"_"+"word2vec.model") else : model_review.save("model\\"+Model_Name+"_"+"word2vec_Skipgram.model") print('Model is Saved : '+str(datetime.datetime.now())) return model_review def load_model_file(modelfilename, list_of_words, sg1=0): if sg1==0: model= Word2Vec.load("model\\"+modelfilename+"_"+"word2vec.model") else : model= Word2Vec.load("model\\"+modelfilename+"_"+"word2vec_Skipgram.model") word1=[] year=[] s_word=[] s_word_rate=[] for x in list_of_words: try : for sw in model.wv.similar_by_word(x) : word1.append(x) year.append(np.nan) s_word.append(sw[0]) s_word_rate.append(sw[1]) except : pass similiar_item=pd.DataFrame({"word": word1, "s_word":s_word, "s_word_rate":s_word_rate}) if sg1==0: similiar_item["Type"]=modelfilename else : similiar_item["Type"]=modelfilename+"_sg" return similiar_item sno = nltk.stem.SnowballStemmer('english') # Creating the object for LDA model using gensim library Lda = gensim.models.ldamodel.LdaModel def preprocess(text): result = [] for token in gensim.utils.simple_preprocess(text): if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3: result.append(sno.stem(token)) return result def create_sentence(rule): print('Reading the sentence Started : '+str(datetime.datetime.now())) # print(len(Dataset["Content"][rule])) Sentences=[] for review in Dataset["Content"][rule]: if pd.isnull(review)== False and review!='': tokens = preprocess(review) tokens=[x for x in tokens if x not in Custom_Stopwords] Sentences.append(tokens) #print(tokens) print('Reading the sentence Completed : '+str(datetime.datetime.now())) print('Total Sentences '+str(len(Sentences))) return Sentences def corpora_dict_model(sentence1): dictionary = corpora.Dictionary(sentence1) doc_term_matrix = [dictionary.doc2bow(doc) for doc in sentence1] print('Creating the model is starting : '+str(datetime.datetime.now())) # Running and Trainign LDA model on the document term matrix. ldamodel = Lda(doc_term_matrix, num_topics=5, id2word = dictionary, #passes=50, random_state=100, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True) print('Creating the model is completed : '+str(datetime.datetime.now())) return (doc_term_matrix,ldamodel) def samplesenteneces(Sentences1, limit=1000): if (len(Sentences1) >= limit) : print('Before restricting (size) : '+str(len(Sentences1))) Sentences1=list(pd.Series(Sentences1).dropna()[np.linspace(0,len(Sentences1),limit,dtype='int')]) print('After restricting (size) : '+str(len(Sentences1))) return list(pd.Series(Sentences1).dropna()) else : print('Sentence size already with in a limit (size) : '+str(len(Sentences1))) return list(pd.Series(Sentences1).dropna()) # + highlowrated_sentence=create_sentence( Dataset['Hotel'].isin(list(city_review[(city_review['overall_ratingsource']<=2) | (city_review['overall_ratingsource']>=4.7)]['doc_id'])) ) dictionary = corpora.Dictionary(highlowrated_sentence) doc_term_matrix = [dictionary.doc2bow(doc) for doc in highlowrated_sentence] print('Creating the model is starting : '+str(datetime.datetime.now())) # Running and Training LDA model on the document term matrix. ldamodel = Lda(doc_term_matrix, num_topics=4, id2word = dictionary, #passes=50, random_state=100, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True) print('Creating the model is completed : '+str(datetime.datetime.now())) # + #Most common words Dataset["NormalisedContent"] = Dataset["Content"].apply(normalise) full_vocab = defaultdict(int) i=0 for review in Dataset["NormalisedContent"] : i=i+1 if review!="": for token in review.split(): full_vocab[token] += 1 list_of_impo_words=[] full_vocab_c=Counter(full_vocab) # - list_of_impo_words=[] for (w,s) in full_vocab_c.most_common(50): if w not in Stopwords : list_of_impo_words.append(w) # + similiars=load_model_file("delhi", list_of_impo_words, 1) london_similiar=load_model_file("london", list_of_impo_words, 1) similiars=pd.concat([similiars,london_similiar],ignore_index=True,axis=0, sort=True) newyork_similiar=load_model_file("newyork", list_of_impo_words, 1) similiars=pd.concat([similiars,newyork_similiar],ignore_index=True,axis=0, sort=True) # - # ### Pre-analyzed topic # # - Topic 0 : Sight seeing # - Topic 1 : Hostel # - Topic 2 : Food # - Topic 3 : Staff Service All_node_words=list(set(list(similiars["s_word"])+list_of_impo_words)) impo_wtopic_id=[] impo_wtopic_name=[] for wtopic in All_node_words: try : topic_id=ldamodel[dictionary.doc2bow([wtopic])][1][0][1][0] if topic_id==0 : topic_name="Sight seeing" elif topic_id==1 : topic_name="Hostel" elif topic_id==2 : topic_name="Food" elif topic_id==3 : topic_name="Staff Service" impo_wtopic_id.append(topic_id) impo_wtopic_name.append(topic_name) except : impo_wtopic_id.append(1) impo_wtopic_name.append("Hostel") node=pd.DataFrame({ "id":All_node_words,"Label":All_node_words, "topic_name":impo_wtopic_name}) node.to_csv("node_topic.csv", index=False) Topic_df=pd.DataFrame({ "word":All_node_words,"topic_id":impo_wtopic_id, "topic_name":impo_wtopic_name}) similiar_topic=similiars.merge(Topic_df, how="inner") city="delhi_sg" Edge=pd.DataFrame({"Source":similiar_topic[similiar_topic["Type"]==city]["word"],"Target":similiar_topic[similiar_topic["Type"]==city]["s_word"],"Type":"Undirected", "Weight" :similiar_topic[similiar_topic["Type"]==city]["s_word_rate"],"Average Degree":"1"}) Edge.to_csv("Edge_"+city+".csv")
Text Graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # String Literals # String literals in python are surrounded by either single quotation marks, or double quotation marks. # # 'hello' is the same as "hello". print("Hello") print('Hello') # # Assign String to a Variable # Assigning a string to a variable is done with the variable name followed by an equal sign and the string: a = "Hello" print(a) # # Multiline Strings # You can assign a multiline string to a variable by using three quotes: a = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.""" print(a) a = '''Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.''' print(a) # # Strings are Arrays # Like many other popular programming languages, strings in Python are arrays of bytes representing unicode characters. # # However, Python does not have a character data type, a single character is simply a string with a length of 1. # # Square brackets can be used to access elements of the string. a = "Hello, World!" print(a[1]) # # Slicing # You can return a range of characters by using the slice syntax. # # Specify the start index and the end index, separated by a colon, to return a part of the string. b = "Hello, World!" print(b[2:5]) b = "Hello, World!" print(b[:2]) b = "Hello, World!" print(b[2:]) # # Negative Indexing # Use negative indexes to start the slice from the end of the string: b = "Hello, World!" print(b[-5:-2]) # # String Length # To get the length of a string, use the len() function. a = "Hello, World!" print(len(a)) # # Check String # To check if a certain phrase or character is present in a string, we can use the keywords in or not in. txt = "The rain in Spain stays mainly in the plain" x = "ain" in txt print(x) txt = "The rain in Spain stays mainly in the plain" x = "ain" not in txt print(x) # # String Concatenation # To concatenate, or combine, two strings you can use the + operator. a = "Hello" b = "World" c = a + ' ' + b print(c) # # String Format # As we learned in the Python Variables chapter, we cannot combine strings and numbers like this: age = 36 txt = "My name is John, and I am {}" print(txt.format(age)) quantity = 3 itemno = 567 price = 49.95 myorder = "I want {} pieces of item {} for {} dollars." print(myorder.format(quantity, itemno, price)) quantity = 3 itemno = 567 price = 49.95 myorder = "I want to pay {2} dollars for {0} pieces of item {1}." print(myorder.format(quantity, itemno, price)) # # Escape Character # To insert characters that are illegal in a string, use an escape character. # # An escape character is a backslash \ followed by the character you want to insert. # # An example of an illegal character is a double quote inside a string that is surrounded by double quotes: txt = "We are the so-called \"Vikings\" from the north." print(txt) # # String Methods # Python has a set of built-in methods that you can use on strings. a = " Hello, World! " print(a.strip()) # returns "Hello, World!" a = "Hello, World!" print(a.lower()) a = "Hello, World!" print(a.upper()) a = "Hello, World!" print(a.replace("H", "J")) a = "Hello, World!" print(a.split(",")) # returns ['Hello', ' World!'] # # Python Collections (Arrays) # There are four collection data types in the Python programming language: # # * List is a collection which is ordered and changeable. Allows duplicate members. # * Tuple is a collection which is ordered and unchangeable. Allows duplicate members. # * Set is a collection which is unordered and unindexed. No duplicate members. # * Dictionary is a collection which is unordered, changeable and indexed. No duplicate members. # # When choosing a collection type, it is useful to understand the properties of that type. Choosing the right type for a particular data set could mean retention of meaning, and, it could mean an increase in efficiency or security. # # List # Lists are used to store multiple items in a single variable. # # Lists are one of 4 built-in data types in Python used to store collections of data, the other 3 are Tuple, Set, and Dictionary, all with different qualities and usage. # # Lists are created using square brackets: thislist = ["apple", "banana", "cherry"] print(thislist) # # List Items # List items are ordered, changeable, and allow duplicate values. # # List items are indexed, the first item has index [0], the second item has index [1] etc. # # # Ordered # When we say that lists are ordered, it means that the items have a defined order, and that order will not change. # # If you add new items to a list, the new items will be placed at the end of the list. # # Changeable # The list is changeable, meaning that we can change, add, and remove items in a list after it has been created. # # Allow Duplicates # Since lists are indexed, lists can have items with the same value: thislist = ["apple", "banana", "cherry", "apple", "cherry"] print(thislist) # # List Length # To determine how many items a list has, use the len() function: thislist = ["apple", "banana", "cherry"] print(len(thislist)) # # List Items - Data Types # List items can be of any data type: list1 = ["apple", "banana", "cherry"] list2 = [1, 5, 7, 9, 3] list3 = [True, False, False] list1 = ["abc", 34, True, 40, "male"] # # type() # From Python's perspective, lists are defined as objects with the data type 'list': mylist = ["apple", "banana", "cherry"] print(type(mylist)) # # The list() Constructor # It is also possible to use the list() constructor when creating a new list. thislist = list(("apple", "banana", "cherry")) # note the double round-brackets print(thislist) # # Access Items # List items are indexed and you can access them by referring to the index number: # # > **_NOTE:_** The first item has index 0. thislist = ["apple", "banana", "cherry"] print(thislist[1]) # # Negative Indexing # Negative indexing means start from the end # # -1 refers to the last item, -2 refers to the second last item etc. thislist = ["apple", "banana", "cherry"] print(thislist[-1]) # # Range of Indexes # You can specify a range of indexes by specifying where to start and where to end the range. # # When specifying a range, the return value will be a new list with the specified items. thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"] print(thislist[2:5]) # > **_Note:_** The search will start at index 2 (included) and end at index 5 (not included). # + #By leaving out the start value, the range will start at the first item: thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"] print(thislist[:4]) # + #By leaving out the end value, the range will go on to the end of the list: thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"] print(thislist[2:]) # - # # Change Item Value # To change the value of a specific item, refer to the index number: thislist = ["apple", "banana", "cherry"] thislist[1] = "blackcurrant" print(thislist) # To insert more than one item, create a list with the new values, # and specify the index number where you want the new values to be inserted: thislist = ["apple", "banana", "cherry"] thislist[1] = ["blackcurrant", "watermelon"] print(thislist) # # Change a Range of Item Values # To change the value of items within a specific range, define a list with the new values, and refer to the range of index numbers where you want to insert the new values: thislist = ["apple", "banana", "cherry", "orange", "kiwi", "mango"] thislist[1:3] = ["blackcurrant", "watermelon"] print(thislist) # # Insert Items # To insert a new list item, without replacing any of the existing values, we can use the insert() method. # # The insert() method inserts an item at the specified index: thislist = ["apple", "banana", "cherry"] thislist.insert(2, "watermelon") print(thislist) # # Append Items # To add an item to the end of the list, use the append() method: thislist = ["apple", "banana", "cherry"] thislist.append("orange") print(thislist) # # Extend List # To append elements from another list to the current list, use the extend() method. thislist = ["apple", "banana", "cherry"] tropical = ["mango", "pineapple", "papaya"] thislist.extend(tropical) print(thislist) # # Add Any Iterable # The extend() method does not have to append lists, you can add any iterable object (tuples, sets, dictionaries etc.). thislist = ["apple", "banana", "cherry"] thistuple = ("kiwi", "orange") thislist.extend(thistuple) print(thislist) # # Remove Specified Item # The remove() method removes the specified item. thislist = ["apple", "banana", "cherry"] thislist.remove("banana") print(thislist) # # Remove Specified Index # The pop() method removes the specified index. # # > **_Note:_** If you do not specify the index, the pop() method removes the last item. thislist = ["apple", "banana", "cherry"] thislist.pop(1) print(thislist) # The del keyword also removes the specified index: thislist = ["apple", "banana", "cherry"] del thislist[0] print(thislist) # The del keyword can also delete the list completely. thislist = ["apple", "banana", "cherry"] del thislist # # Clear the List # The clear() method empties the list. # # The list still remains, but it has no content. thislist = ["apple", "banana", "cherry"] thislist.clear() print(thislist) # # Loop Through a List # You can loop through the list items by using a for loop: thislist = ["apple", "banana", "cherry"] for x in thislist: print(x) # # Loop Through the Index Numbers # You can also loop through the list items by referring to their index number. # # Use the range() and len() functions to create a suitable iterable. thislist = ["apple", "banana", "cherry"] for i in range(len(thislist)): print(thislist[i]) thislist = ["apple", "banana", "cherry"] for i in enumerate(thislist): print(i) # # Sort List Alphanumerically # List objects have a sort() method that will sort the list alphanumerically, ascending, by default: thislist = ["orange", "mango", "kiwi", "pineapple", "banana"] thislist.sort() print(thislist) # + # Sort the list numerically: thislist = [100, 50, 65, 82, 23] thislist.sort() print(thislist) # - # # Reverse Order # What if you want to reverse the order of a list, regardless of the alphabet? # # The reverse() method reverses the current sorting order of the elements. thislist = ["banana", "Orange", "Kiwi", "cherry"] thislist.reverse() print(thislist) # # Copy a List # You cannot copy a list simply by typing list2 = list1, because: list2 will only be a reference to list1, and changes made in list1 will automatically also be made in list2. # # There are ways to make a copy, one way is to use the built-in List method copy(). # + thislist = ["apple", "banana", "cherry"] thislist2 = thislist print(thislist) print(thislist2) # - del thislist[0] print(thislist) print(thislist2) print(id(thislist)) print(id(thislist2)) thislist = ["apple", "banana", "cherry"] mylist = thislist.copy() print(mylist) print(id(thislist)) print(id(mylist)) thislist = ["apple", "banana", "cherry"] mylist = list(thislist) print(mylist) # # Join Two Lists # There are several ways to join, or concatenate, two or more lists in Python. # # One of the easiest ways are by using the + operator. # + list1 = ["a", "b", "c"] list2 = [1, 2, 3] list3 = list1 + list2 print(list3) # + # Another way to join two lists are by appending all the items from list2 into list1, one by one: list1 = ["a", "b" , "c"] list2 = [1, 2, 3] for x in list2: list1.append(x) print(list1) # + # Or you can use the extend() method, which purpose is to add elements from one list to another list: list1 = ["a", "b" , "c"] list2 = [1, 2, 3] list1.extend(list2) print(list1) # - # # Tuple # Tuples are used to store multiple items in a single variable. # # Tuple is one of 4 built-in data types in Python used to store collections of data, the other 3 are List, Set, and Dictionary, all with different qualities and usage. # # A tuple is a collection which is ordered and **unchangeable**. # # Tuples are written with round brackets. thistuple = ("apple", "banana", "cherry") print(thistuple) # # Tuple Items # Tuple items are ordered, changeable, and allow duplicate values. # # Tuple items are indexed, the first item has index [0], the second item has index [1] etc. # # # # Ordered # When we say that tuples are ordered, it means that the items have a defined order, and that order will not change. # # Unchangeable # Tuples are unchangeable, meaning that we cannot change, add or remove items after the tuple has been created. # # Allow Duplicates # Since tuple are indexed, tuples can have items with the same value: thistuple = ("apple", "banana", "cherry", "apple", "cherry") print(thistuple) # # Tuple Length # To determine how many items a tuple has, use the len() function: thistuple = ("apple", "banana", "cherry") print(len(thistuple)) # # Create Tuple With One Item # To create a tuple with only one item, you have to add a comma after the item, otherwise Python will not recognize it as a tuple. # # # + thistuple = ("apple",) print(type(thistuple)) #NOT a tuple thistuple = ("apple") print(type(thistuple)) # - # # Tuple Items - Data Types # Tuple items can be of any data type: tuple1 = ("apple", "banana", "cherry") tuple2 = (1, 5, 7, 9, 3) tuple3 = (True, False, False) tuple1 = ("abc", 34, True, 40, "male") # # type() # From Python's perspective, tuples are defined as objects with the data type 'tuple': mytuple = ("apple", "banana", "cherry") print(type(mytuple)) # # The tuple() Constructor # It is also possible to use the tuple() constructor to make a tuple. thistuple = tuple(("apple", "banana", "cherry")) # note the double round-brackets print(thistuple) # # Access Tuple Items # You can access tuple items by referring to the index number, inside square brackets: # # > **_Note:_** The first item has index 0. thistuple = ("apple", "banana", "cherry") print(thistuple[1]) # # Negative Indexing # Negative indexing means start from the end. # # -1 refers to the last item, -2 refers to the second last item etc. thistuple = ("apple", "banana", "cherry") print(thistuple[-1]) # # Range of Indexes # You can specify a range of indexes by specifying where to start and where to end the range. # # When specifying a range, the return value will be a new tuple with the specified items. # # > **_Note:_** The search will start at index 2 (included) and end at index 5 (not included). thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango") print(thistuple[2:5]) # + # By leaving out the start value, the range will start at the first item: thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango") print(thistuple[:4]) # + # By leaving out the end value, the range will go on to the end of the list: thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango") print(thistuple[2:]) # - # # Range of Negative Indexes # Specify negative indexes if you want to start the search from the end of the tuple: thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango") print(thistuple[-4:-1]) # # Check if Item Exists # To determine if a specified item is present in a tuple use the in keyword: thistuple = ("apple", "banana", "cherry") if "apple" in thistuple: print("Yes, 'apple' is in the fruits tuple") # # Update Tuples # Tuples are unchangeable, meaing that you cannot change, add, or remove items once the tuple is created. # # But there are some workarounds. # # Change Tuple Values # Once a tuple is created, you cannot change its values. Tuples are unchangeable, or immutable as it also is called. # # But there is a workaround. You can convert the tuple into a list, change the list, and convert the list back into a tuple. # + x = ("apple", "banana", "cherry") y = list(x) y[1] = "kiwi" x = tuple(y) print(x) # - # # Add Items # Once a tuple is created, you cannot add items to it. # + active="" # thistuple = ("apple", "banana", "cherry") # thistuple.append("orange") # This will raise an error # print(thistuple) # - # Just like the workaround for changing a tuple, you can convert it into a list, add your item(s), and convert it back into a tuple. thistuple = ("apple", "banana", "cherry") y = list(thistuple) y.append("orange") thistuple = tuple(y) print(thistuple) # # Remove Items # > **_Note:_** You cannot remove items in a tuple. # # Tuples are unchangeable, so you cannot remove items from it, but you can use the same workaround as we used for changing and adding tuple items: thistuple = ("apple", "banana", "cherry") y = list(thistuple) y.remove("apple") thistuple = tuple(y) print(thistuple) # Or you can delete the tuple completely: thistuple = ("apple", "banana", "cherry") del thistuple print(thistuple) #this will raise an error because the tuple no longer exists # # Unpacking a Tuple # # When we create a tuple, we normally assign values to it. This is called "packing" a tuple: fruits = ("apple", "banana", "cherry") # But, in Python, we are also allowed to extract the values back into variables. This is called "unpacking": # # > **_Note:_** The number of variables must match the number of values in the tuple, if not, you must use an asterix to collect the remaining values as a list. # + fruits = ("apple", "banana", "cherry") green, yellow, red = fruits print(green) print(yellow) print(red) # - # # Using Asterix* # If the number of variables is less than the number of values, you can add an * to the variable name and the values will be assigned to the variable as a list: # + fruits = ("apple", "banana", "cherry", "strawberry", "raspberry") green, yellow, *red = fruits print(green) print(yellow) print(red) # - # If the asterix is added to another variable name than the last, Python will assign values to the variable until the number of values left matches the number of variables left. # + fruits = ("apple", "mango", "papaya", "pineapple", "cherry") (green, *tropic, red) = fruits print(green) print(tropic) print(red) # - # # Loop Through a Tuple # You can loop through the tuple items by using a for loop. thistuple = ("apple", "banana", "cherry") for x in thistuple: print(x) # # Loop Through the Index Numbers # You can also loop through the tuple items by referring to their index number. # # Use the range() and len() functions to create a suitable iterable. thistuple = ("apple", "banana", "cherry") for i in range(len(thistuple)): print(thistuple[i]) # # Join Two Tuples # To join two or more tuples you can use the + operator: # + tuple1 = ("a", "b" , "c") tuple2 = (1, 2, 3) tuple3 = tuple1 + tuple2 print(tuple3) # - # # Multiply Tuples # If you want to multiply the content of a tuple a given number of times, you can use the * operator: # + fruits = ("apple", "banana", "cherry") mytuple = fruits * 2 print(mytuple) # - # # Dictionary # Dictionaries are used to store data values in key:value pairs. # # A dictionary is a collection which is unordered, changeable and does not allow duplicates. # # Dictionaries are written with curly brackets, and have keys and values: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } print(thisdict) # # Dictionary Items # Dictionary items are unordered, changeable, and does not allow duplicates. # # Dictionary items are presented in key:value pairs, and can be referred to by using the key name. thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } print(thisdict["brand"]) # # Unordered # When we say that dictionaries are ordered, it means that the items does not have a defined order, you cannot refer to an item by using an index. # # Changeable # Dictionaries are changeable, meaning that we can change, add or remove items after the dictionary has been created. # # Duplicates Not Allowed # Dictionaries cannot have two items with the same key: # Duplicate values will overwrite existing values: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964, "year": 2020 } print(thisdict) # # Dictionary Length # To determine how many items a dictionary has, use the len() function: print(len(thisdict)) # # Dictionary Items - Data Types # The values in dictionary items can be of any data type: thisdict = { "brand": "Ford", "electric": False, "year": 1964, "colors": ["red", "white", "blue"] } # # type() # From Python's perspective, dictionaries are defined as objects with the data type 'dict': thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } print(type(thisdict)) # # Accessing Items # You can access the items of a dictionary by referring to its key name, inside square brackets: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } x = thisdict["model"] print(x) # + # There is also a method called get() that will give you the same result: x = thisdict.get("model") print(x) # - # # Get Keys # The keys() method will return a list of all the keys in the dictionary. # + #Get a list of the keys: x = thisdict.keys() print(x) # + # The list of the keys is a view of the dictionary, # meaning that any changes done to the dictionary will be reflected in the keys list. car = { "brand": "Ford", "model": "Mustang", "year": 1964 } x = car.keys() print(x) #before the change car["color"] = "white" print(x) #after the change # - # # Get Values # The values() method will return a list of all the values in the dictionary. x = thisdict.values() print(x) # + # The list of the values is a view of the dictionary, # meaning that any changes done to the dictionary will be reflected in the values list. car = { "brand": "Ford", "model": "Mustang", "year": 1964 } x = car.values() print(x) #before the change car["year"] = 2020 print(x) #after the change # - # # Get Items # The items() method will return each item in a dictionary, as tuples in a list. x = thisdict.items() print(x) # + # The returned list is a view of the items of the dictionary, # meaning that any changes done to the dictionary will be reflected in the items list. car = { "brand": "Ford", "model": "Mustang", "year": 1964 } x = car.items() print(x) #before the change car["year"] = 2020 print(x) #after the change # - # # Check if Key Exists # To determine if a specified key is present in a dictionary use the in keyword: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } if "model" in thisdict: print("Yes, 'model' is one of the keys in the thisdict dictionary") # # Change Values # You can change the value of a specific item by referring to its key name: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict["year"] = 2018 print(thisdict) # # Update Dictionary # The update() method will update the dictionary with the items from the given argument. # # The argument must be a dictionary, or an iterable object with key:value pairs. thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict.update({"year": 2020}) print(thisdict) # # Adding Items # Adding an item to the dictionary is done by using a new index key and assigning a value to it: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict["color"] = "red" print(thisdict) # # Update Dictionary # The update() method will update the dictionary with the items from a given argument. If the item does not exist, the item will be added. # # The argument must be a dictionary, or an iterable object with key:value pairs. thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict.update({"color": "red"}) print(thisdict) # # Removing Items # There are several methods to remove items from a dictionary: # + # The pop() method removes the item with the specified key name: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict.pop("model") print(thisdict) # + # The popitem() method removes the last inserted item (in versions before 3.7, a random item is removed instead): thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict.popitem() print(thisdict) # + # The del keyword removes the item with the specified key name: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } del thisdict["model"] print(thisdict) # + # The del keyword can also delete the dictionary completely: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } del thisdict # print(thisdict) #this will cause an error because "thisdict" no longer exists. # + # The clear() method empties the dictionary: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } thisdict.clear() print(thisdict) # - # # Loop Through a Dictionary # You can loop through a dictionary by using a for loop. # # When looping through a dictionary, the return value are the keys of the dictionary, but there are methods to return the values as well. # + # Print all key names in the dictionary, one by one: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } for x in thisdict: print(x) # + #Print all values in the dictionary, one by one: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } for x in thisdict: print(thisdict[x]) # + # You can also use the values() method to return values of a dictionary: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } for x in thisdict.values(): print(x) # + # You can use the keys() method to return the keys of a dictionary: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } for x in thisdict.keys(): print(x) # + # Loop through both keys and values, by using the items() method: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } for x, y in thisdict.items(): print(x, y) # - # # Copy a Dictionary # You cannot copy a dictionary simply by typing dict2 = dict1, because: dict2 will only be a reference to dict1, and changes made in dict1 will automatically also be made in dict2. # # There are ways to make a copy, one way is to use the built-in Dictionary method copy(). # + # Make a copy of a dictionary with the copy() method: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } mydict = thisdict.copy() print(mydict) # + # Make a copy of a dictionary with the dict() function: thisdict = { "brand": "Ford", "model": "Mustang", "year": 1964 } mydict = dict(thisdict) print(mydict) # - # # Nested Dictionaries # A dictionary can contain dictionaries, this is called nested dictionaries. # + # Create a dictionary that contain three dictionaries: myfamily = { "child1" : { "name" : "Emil", "year" : 2004 }, "child2" : { "name" : "Tobias", "year" : 2007 }, "child3" : { "name" : "Linus", "year" : 2011 } } print(myfamily) # + # Create three dictionaries, then create one dictionary that will contain the other three dictionaries: child1 = { "name" : "Emil", "year" : 2004 } child2 = { "name" : "Tobias", "year" : 2007 } child3 = { "name" : "Linus", "year" : 2011 } myfamily = { "child1" : child1, "child2" : child2, "child3" : child3 } print(myfamily) # - # # Set # Sets are used to store multiple items in a single variable. # # Set is one of 4 built-in data types in Python used to store collections of data, the other 3 are List, Tuple, and Dictionary, all with different qualities and usage. # # A set is a collection which is both **unordered** and **unindexed**. # # Sets are written with curly brackets. # # > **_Note:_** Sets are unordered, so you cannot be sure in which order the items will appear. thisset = {"apple", "banana", "cherry"} print(thisset) # # Set Items # Set items are unordered, unchangeable, and do not allow duplicate values. # # Unordered # Unordered means that the items in a set do not have a defined order. # # Set items can appear in a different order every time you use them, and cannot be refferred to by index or key. # # Unchangeable # Sets are unchangeable, meaning that we cannot change the items after the set has been created. # # Once a set is created, you cannot change its items, but you can add new items. # # Duplicates Not Allowed # Sets cannot have two items with the same value. # + thisset = {"apple", "banana", "cherry", "apple"} print(thisset) # - # # Get the Length of a Set # To determine how many items a set has, use the len() method. # + thisset = {"apple", "banana", "cherry"} print(len(thisset)) # - # # Set Items - Data Types # Set items can be of any data type: set1 = {"apple", "banana", "cherry"} set2 = {1, 5, 7, 9, 3} set3 = {True, False, False} set1 = {"abc", 34, True, 40, "male"} # # type() # From Python's perspective, sets are defined as objects with the data type 'set': myset = {"apple", "banana", "cherry"} print(type(myset)) # # The set() Constructor # It is also possible to use the set() constructor to make a set. thisset = set(("apple", "banana", "cherry")) # note the double round-brackets print(thisset) # # Access Items # You cannot access items in a set by referring to an index or a key. # # But you can loop through the set items using a for loop, or ask if a specified value is present in a set, by using the in keyword. # + thisset = {"apple", "banana", "cherry"} for x in thisset: print(x) # + thisset = {"apple", "banana", "cherry"} print("banana" in thisset) # - # # Change Items # Once a set is created, you cannot change its items, but you can add new items. # # Add Items # Once a set is created, you cannot change its items, but you can add new items. # # To add one item to a set use the add() method. # # + thisset = {"apple", "banana", "cherry"} thisset.add("orange") print(thisset) # - # # Add Sets # To add items from another set into the current set, use the update() method. # + thisset = {"apple", "banana", "cherry"} tropical = {"pineapple", "mango", "papaya"} thisset.update(tropical) print(thisset) # - # # Add Any Iterable # The object in the update() method does not have be a set, it can be any iterable object (tuples, lists, dictionaries et,). # + thisset = {"apple", "banana", "cherry"} mylist = ["kiwi", "orange"] thisset.update(mylist) print(thisset) # - # # Remove Item # To remove an item in a set, use the remove(), or the discard() method. # + thisset = {"apple", "banana", "cherry"} thisset.remove("banana") print(thisset) # - # > **_Note:_** If the item to remove does not exist, remove() will raise an error. # + thisset = {"apple", "banana", "cherry"} thisset.discard("banana") print(thisset) # - # > **_Note:_** If the item to remove does not exist, discard() will NOT raise an error. # + thisset = {"apple", "banana", "cherry"} x = thisset.pop() print(x) print(thisset) # - # > **_Note:_** Sets are unordered, so when using the pop() method, you do not know which item that gets removed. # + # The clear() method empties the set: thisset = {"apple", "banana", "cherry"} thisset.clear() print(thisset) # + # The del keyword will delete the set completely: thisset = {"apple", "banana", "cherry"} del thisset # print(thisset) # - # # Loop Items # You can loop through the list items by using a for loop: # + thisset = {"apple", "banana", "cherry"} for x in thisset: print(x) # - # # Join Two Sets # There are several ways to join two or more sets in Python. # # You can use the union() method that returns a new set containing all items from both sets, or the update() method that inserts all the items from one set into another: # # > **_Note:_** Both union() and update() will exclude any duplicate items. # + # The union() method returns a new set with all items from both sets: set1 = {"a", "b" , "c"} set2 = {1, 2, 3} set3 = set1.union(set2) print(set3) # + # The update() method inserts the items in set2 into set1: set1 = {"a", "b" , "c"} set2 = {1, 2, 3} set1.update(set2) print(set1) # - # # Keep ONLY the Duplicates # The intersection_update() method will keep only the items that are present in both sets. # + # Keep the items that exist in both set x, and set y: x = {"apple", "banana", "cherry"} y = {"google", "microsoft", "apple"} x.intersection_update(y) print(x) # + # The intersection() method will return a new set, that only contains the items that are present in both sets. x = {"apple", "banana", "cherry"} y = {"google", "microsoft", "apple"} z = x.intersection(y) print(z) # - # # Keep All, But NOT the Duplicates # The symmetric_difference_update() method will keep only the elements that are NOT present in both sets. # + x = {"apple", "banana", "cherry"} y = {"google", "microsoft", "apple"} x.symmetric_difference_update(y) print(x) # + # The symmetric_difference() method will return a new set, that contains only the elements that are NOT present in both sets. x = {"apple", "banana", "cherry"} y = {"google", "microsoft", "apple"} z = x.symmetric_difference(y) print(x,z) # - # # Python Functions # A function is a block of code which only runs when it is called. # # You can pass data, known as parameters, into a function. # # A function can return data as a result. # # # # Creating a Function # In Python a function is defined using the def keyword: def my_function(): print("Hello from a function") # # Calling a Function # To call a function, use the function name followed by parenthesis: # + def my_function(): print("Hello from a function") my_function() # - # # Arguments # Information can be passed into functions as arguments. # # Arguments are specified after the function name, inside the parentheses. You can add as many arguments as you want, just separate them with a comma. # > **_note:_** Arguments are often shortened to args in Python documentations. # # The following example has a function with one argument (fname). When the function is called, we pass along a first name, which is used inside the function to print the full name: # + def my_function(fname): print(fname + " Refsnes") my_function("Emil") my_function("Tobias") my_function("Linus") # - # # Parameters or Arguments? # The terms parameter and argument can be used for the same thing: information that are passed into a function. # # # > From a function's perspective:<br> # A parameter is the variable listed inside the parentheses in the function definition.<br> # An argument is the value that is sent to the function when it is called. # # Number of Arguments # By default, a function must be called with the correct number of arguments. Meaning that if your function expects 2 arguments, you have to call the function with 2 arguments, not more, and not less. # + # This function expects 2 arguments, and gets 2 arguments: def my_function(fname, lname): print(fname + " " + lname) my_function("Emil", "Refsnes") # + # If you try to call the function with 1 or 3 arguments, you will get an error: # This function expects 2 arguments, but gets only 1: def my_function(fname, lname): print(fname + " " + lname) #my_function("Emil") # - # # Arbitrary Arguments, *args # If you do not know how many arguments that will be passed into your function, add a * before the parameter name in the function definition. # # > **_Note:_** Arbitrary Arguments are often shortened to *args in Python documentations. # # This way the function will receive a tuple of arguments, and can access the items accordingly: # + # If the number of arguments is unknown, add a * before the parameter name: def my_function(*kids): print("The youngest child is " + kids[2]) my_function("Emil", "Tobias", "Linus") # - # # Keyword Arguments # You can also send arguments with the key = value syntax. # # This way the order of the arguments does not matter. # # > **_Note:_** The phrase Keyword Arguments are often shortened to kwargs in Python documentations. # + def my_function(child3, child2, child1): print("The youngest child is " + child3) my_function(child1 = "Emil", child2 = "Tobias", child3 = "Linus") # - # # Arbitrary Keyword Arguments, **kwargs # If you do not know how many keyword arguments that will be passed into your function, add two asterisk: ** before the parameter name in the function definition. # # > **_Note:_** Arbitrary Kword Arguments are often shortened to **kwargs in Python documentations. # # This way the function will receive a dictionary of arguments, and can access the items accordingly: # + # If the number of keyword arguments is unknown, add a double ** before the parameter name: def my_function(**kid): print("His last name is " + kid["lname"]) my_function(fname = "Tobias", lname = "Refsnes") # - # # Default Parameter Value # The following example shows how to use a default parameter value. # # If we call the function without argument, it uses the default value: # + def my_function(country = "Norway"): print("I am from " + country) my_function("Sweden") my_function("India") my_function() my_function("Brazil") # - # # Passing a List as an Argument # You can send any data types of argument to a function (string, number, list, dictionary etc.), and it will be treated as the same data type inside the function. # # E.g. if you send a List as an argument, it will still be a List when it reaches the function: # + def my_function(food): for x in food: print(x) fruits = ["apple", "banana", "cherry"] my_function(fruits) # - # # Return Values # To let a function return a value, use the return statement: # + def my_function(x): return 5 * x print(my_function(3)) print(my_function(5)) print(my_function(9)) # - # # The pass Statement # function definitions cannot be empty, but if you for some reason have a function definition with no content, put in the pass statement to avoid getting an error. def myfunction(): pass
Week03/PythonPart02.ipynb
% --- % jupyter: % jupytext: % text_representation: % extension: .m % format_name: light % format_version: '1.5' % jupytext_version: 1.14.4 % kernelspec: % display_name: Octave % language: octave % name: octave % --- % # LU-factorization % % ## Example from the slides A = [2 -1 0 0; -1 2 -1 0; 0 -1 2 -1; 0 0 -1 2] [L U] = lu(A) b = [1 0 0 1]' y = L\b x = U\y AProof = [0 -2 1; -1 2 -1; -2 -1 1]; P = [0 0 1; 0 1 0; 1 0 0]; [L U P] = lu(AProof) X = P * AProof - L * U; X
Week_01/Slides_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_latest_p36 # language: python # name: conda_pytorch_latest_p36 # --- # !pip install "sagemaker>=2.48.0" "transformers==4.6.1" "datasets[s3]==1.6.2" --upgrade # + import sagemaker.huggingface import sagemaker sess = sagemaker.Session() role = sagemaker.get_execution_role() print(f"IAM role arn used for running training: {role}") print(f"S3 bucket used for storing artifacts: {sess.default_bucket()}") # + import pandas as pd import os from datasets import load_dataset df_article_summary_full = pd.read_parquet('./meta_description.parquet', engine='pyarrow') df_article_summary_full[['original_text','meta_descrption']].to_csv('./total.csv',index=False) total_data = pd.read_csv('./total.csv') x = total_data[-total_data['meta_descrption'].isnull()] x.columns = ['article','summarization'] # use csv file to test x[:1000].to_csv('./train.csv',index=False,encoding='utf-8') x[1000:1200].to_csv('./test.csv',index=False,encoding='utf-8') x[1200:1400].to_csv('./dev.csv',index=False,encoding='utf-8') # + import boto3 prefix='hk01' bucket = sess.default_bucket() boto3.Session().resource("s3").Bucket(bucket).Object( os.path.join(prefix, "train/train.csv") ).upload_file("./train.csv") boto3.Session().resource("s3").Bucket(bucket).Object( os.path.join(prefix, "test/test.csv") ).upload_file("./test.csv") # - training_input_path = f's3://{sess.default_bucket()}/{prefix}/train/train.csv' test_input_path = f's3://{sess.default_bucket()}/{prefix}/test/test.csv' git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.6.1'} # v4.6.1 is referring to the `transformers_version` you use in the estimator. # + hyperparameters={'per_device_train_batch_size':1, 'per_device_eval_batch_size': 1, 'model_name_or_path': 'google/mt5-base', 'train_file':'/opt/ml/input/data/train/train.csv', 'validation_file':'/opt/ml/input/data/test/test.csv', 'test_file':'/opt/ml/input/data/test/test.csv', 'do_train': True, 'do_predict': True, 'do_eval': True, 'text_column':'article', 'summary_column':'summarization', 'save_total_limit':3, 'num_train_epochs': 1, 'predict_with_generate': True, 'output_dir': '/opt/ml/model', 'num_train_epochs': 1, 'learning_rate': 5e-5, 'seed': 7, 'fp16': False, 'source_prefix': "summarize: ", 'eval_steps': 1000, } # configuration for running training on smdistributed Data Parallel #distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}} # + from sagemaker.huggingface import HuggingFace # create the Estimator huggingface_estimator = HuggingFace( entry_point='run_summarization.py', # script source_dir='./examples/pytorch/summarization', # relative path to example git_config=git_config, instance_type='ml.p3.16xlarge', # here better to use ml.p3dn.24xlarge if available instance_count=1, volume_size=500, transformers_version='4.6', pytorch_version='1.7', py_version='py36', role=role, base_job_name='mt5', hyperparameters = hyperparameters ) # - huggingface_estimator.fit({'train':training_input_path,'test':test_input_path}) predictor = huggingface_estimator.deploy(1,"ml.g4dn.xlarge") # + conversation = '''Jeff: Can I train a ๐Ÿค— Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face ''' data= {"inputs":conversation} predictor.predict(data)
sagemaker/08_distributed_summarization_bart_t5/train-huggingface.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.0 # language: julia # name: julia-1.7 # --- # # MATH50003 Numerical Analysis: Problem Sheet 6 # # This problem sheet explores condition numbers, indefinite integration, # and Euler's method. # # Questions marked with a โ‹† are meant to be completed without using a computer. # Problems are denoted A/B/C to indicate their difficulty. using LinearAlgebra, Plots, Test # ## 1. Two-point boundary value problems # # **Problem 1.1 (C)** Construct a finite-difference approximation to the # forced Helmholtz equation # $$ # \begin{align*} # u(0) &= 0 \\ # u(1) &= 0 \\ # u'' + k^2 u &= {\rm e}^x # \end{align*} # $$ # and find an $n$ such the error is less than $10^{-4}$ when compared # with the true solution for $k=10$: # $$ # u(x) = (-\cos(k x) + {\rm e}^x \cos(k x)^2 + \cot(k) \sin(k x) - {\rm e} \cos(k) \cot(k) \sin(k x) - {\rm e} \sin(k) \sin(k x) + {\rm e}^x \sin(k x)^2)/(1 + k^2) # $$ # + function helm(k, n) x = range(0, 1; length = n) h = step(x) # TODO: Create a SymTridiagonal discretisation end k = 10 u = x -> (-cos(k*x) + exp(x)cos(k*x)^2 + cot(k)sin(k*x) - โ„ฏ*cos(k)cot(k)sin(k*x) - โ„ฏ*sin(k)sin(k*x) + exp(x)sin(k*x)^2)/(1 + k^2) n = 10 # TODO: choose n to get convergence x = range(0, 1; length=n) @test norm(helm(k, n) - u.(x)) โ‰ค 1E-4 # - # **Problem 1.2 (A)** Discretisations can also be used to solve eigenvalue problems. # Consider the Schrรถdinger equation with quadratic oscillator: # $$ # u(-L) = u(L) = 0, -u'' + x^2 u = ฮป u # $$ # (a) Use the finite-difference approximation to discretise this equation as eigenvalues of a # matrix. Hint: write # $$ # \begin{align*} # u(-L) = 0 \\ # -u'' + x^2 u - ฮปu = 0\\ # u(L) = 0 # \end{align*} # $$ # and discretise as before, doing row eliminations to arrive at a symmetric tridiagonal # matrix eigenvalue problem. # (b) Approximate the eigenvalues using `eigvals(A)` (which returns the eigenvalues of a # matrix `A`) with $L = 10$. # Can you conjecture their exact value if $L = โˆž$? Hint: they are integers and the eigenvalues # closest to zero are most accurate. # **Problem 1.3โ‹† (A)** Consider Helmholtz with Neumann conditions: # $$ # u'(0) = c_0 \\ # u'(1) = c_1 \\ # u_{xx} + k^2 u = f(x) # $$ # Write down the finite difference approximation approximating # $u(x_k) โ‰ˆ u_k$ on # an evenly spaced grid $x_k = (k-1)/(n-1)$ for $k=1,โ€ฆ,n$ # using the first order derivative approximation conditions: # $$ # \begin{align*} # u'(0) &โ‰ˆ (u_2-u_1)/h = c_0 \\ # u'(1) &โ‰ˆ (u_n-u_{n-1})/h = c_1 # \end{align*} # $$ # Use pivoting to reduce the equation to one involving a # symmetric tridiagonal matrix. # # ##ย 2. Convergence # # **Problem 2.1โ‹† (B)** For the equation # $$ # \begin{align*} # u(0) &= c_0 \\ # u' + au &= f(x) # \end{align*} # $$ # where $a โˆˆ โ„$ and $0 โ‰คย x โ‰คย 1$, # prove convergence as $n โ†’ โˆž$ for the method constructed in PS6 using the approximation # where we take the average of the two grid points: # $$ # {u'(x_{k+1}) + u'(x_k) \over 2} โ‰ˆ {u_{k+1} - u_k \over h}. # $$ # # **Problem 2.2โ‹† (A)** Consider the matrices # $$ # L = \begin{bmatrix} 1 \\ # -a_1 & 1 \\ # & -a_2 & 1\\ # && โ‹ฑ & โ‹ฑ \\ # &&& -a_{n-1} & 1 # \end{bmatrix}, \qquad T = \begin{bmatrix} 1 \\ # -a & 1 \\ # & -a & 1\\ # && โ‹ฑ & โ‹ฑ \\ # &&& -a & 1 # \end{bmatrix}. # $$ # By writing down the inverse explicitly prove that if $|a_k| โ‰ค a$ then # $$ # \|L^{-1}\|_{1 โ†’ โˆž} โ‰คย \|T^{-1}\|_{1 โ†’ โˆž}. # $$ # Use this to prove convergence as $n โ†’ โˆž$ of forward Euler for # $$ # \begin{align*} # u(0) &= c_0 \\ # u'(x) - a(x)u(x) &= f(x) # \end{align*} # $$ # # # # # # ## 3. Fourier series # # **Problem 3.1โ‹† (C)** Give explicit formulae for $fฬ‚_k$ and $fฬ‚_k^n$ for the following functions: # $$ # \cos ฮธ, \cos 4ฮธ, \sin^4ฮธ, {3 \over 3 - {\rm e}^{\rm i ฮธ}}, {1 \over 1 - 2{\rm e}^{\rm i ฮธ}} # $$ # Hint: You may wish to try the change of variables $z = {\rm e}^{-{\rm i}ฮธ}$. # # **Problem 3.2โ‹† (B)** Prove that if the first $ฮป-1$ derivatives $f(ฮธ), f'(ฮธ), โ€ฆ, f^{(ฮป-1)}(ฮธ)$ # are 2ฯ€-periodic and $f^{(ฮป)}$ is uniformly bounded that # $$ # |fฬ‚_k| = O(|k|^{-ฮป})\qquad \hbox{as $|k| โ†’ โˆž$} # $$ # Use this to show for the Taylor case ($0 = fฬ‚_{-1} = fฬ‚_{-2} = โ‹ฏ$) that # $$ # |f(ฮธ) - โˆ‘_{k=0}^{n-1} fฬ‚_k {\rm e}^{{\rm i}kฮธ}| = O(n^{1-ฮป}) # $$ # # # **Problem 3.3โ‹† (C)** # If $f$ is a trigonometric polynomial ($fฬ‚_k = 0$ for $|k| > m$) show # for $n โ‰ฅ 2m+1$ we can exactly recover $f$: # $$ # f(ฮธ) = \sum_{k=-m}^m fฬ‚_k^n {\rm e}^{{\rm i} k ฮธ} # $$ # # # # **Problem 3.4โ‹† (B)** For the general (non-Taylor) case and $n = 2m+1$, prove convergence for # $$ # f_{-m:m}(ฮธ) := โˆ‘_{k=-m}^m fฬ‚_k^n {\rm e}^{{\rm i} k ฮธ} # $$ # to $f(ฮธ)$ as $n \rightarrow โˆž$. # What is the rate of convergence if the first $ฮป-1$ derivatives $f(ฮธ), f'(ฮธ), โ€ฆ, f^{(ฮป-1)}(ฮธ)$ # are 2ฯ€-periodic and $f^{(ฮป)}$ is uniformly bounded?
sheets/week7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parabola and Translating Line # + # --------IMPORTING LIBRARIES-------- import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from IPython.display import HTML # + # --------FORMATTING THE DISPLAY-------- # Setting the size of the display window fig = plt.figure(figsize = (7, 7)) # Setting the axes sizes plt.xlim((-2, 8)) plt.ylim((-4, 12)) # Setting the axes markers plt.xticks([-2, 0, 2, 4, 6, 8]) plt.yticks([-4, -2, 0, 2, 4, 6, 8, 10, 12]) # Plot axes lines plt.axhline(0, color = 'black', lw =1) plt.axvline(0, color = 'black', lw = 1) # Plot grid liens plt.grid() # --------PLOTTING POINTS AND FUNCTIONS-------- # Define and plot parabola def parabola(x): y = x*(6 - x) return y X = np.linspace(-2, 8, 200) plt.plot(X, parabola(X), 'r-', lw = 3) # --------CREATE ANIMATION OBJECTS-------- def line(x): y = 2*x + 8 return y X = np.linspace(-2, 8, 100) line, = plt.plot(X, line(X), 'b-') points, = plt.plot([], [], 'ko', markersize = 7) # + # --------ANIMATION FUNCTION-------- # Updating the y-intercept value (b) # Total animation time T = 10 # Change in x every frame deltaB = 0.1 # Initial x-value b0 = 8 # Final x-value bN = -8 # Number of frames and time intervals calculated from above information N = abs(int(((bN - b0)/deltaB) + 1)) deltaT = T*1000/N # This animate function will create each frame in our stop-motion animation. The parameter i is the frame number. def animate(i): b = b0 - (deltaB*i) def f(x): y = 2*x + b return y X = np.linspace(-2, 8, 100) line.set_data(X, f(X)) if 16 - 4*b >= 0: x1 = (4 + np.sqrt(16 - 4*b))/2 x2 = (4 - np.sqrt(16 - 4*b))/2 y1 = -x1**2 + 6*x1 y2 = -x2**2 + 6*x2 points.set_data([x1, x2], [y1, y2]) return line, points anim = animation.FuncAnimation(fig, animate, frames=N, interval = deltaT, repeat = False) HTML(anim.to_html5_video()) # -
OtherProjects/translating-line-animation-solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import tensorflow as tf x = tf.Variable(3, name='x') y = tf.Variable(4, name='y') f = x * x * y + y + 2 # + init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) result = sess.run(f) print(result) # + import numpy as np from sklearn.datasets import fetch_california_housing housing = fetch_california_housing() m, n = housing.data.shape housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data] X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X') y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y') XT = tf.transpose(X) theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y) with tf.Session() as sess: theta_value = theta.eval() print(theta_value) # - from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_housing_data = scaler.fit_transform(housing.data) scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data] print(scaled_housing_data_plus_bias.mean(axis=0)) print(scaled_housing_data_plus_bias.mean(axis=1)) print(scaled_housing_data_plus_bias.mean()) print(scaled_housing_data_plus_bias.shape) # + tf.reset_default_graph() n_epochs = 10000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(mse) mse_summary = tf.summary.scalar('MSE', mse) file_writer = tf.summary.FileWriter('tf_logs/up-and-running') init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs+1): if epoch % 100 == 0: print "Epoch", epoch, "MSE =", mse.eval() file_writer.add_summary(mse_summary.eval(), epoch / 100) sess.run(training_op) best_theta = theta.eval() file_writer.close()
TensorflowUp_exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic usage of modified transformers library (modification have been done only for BERT) # # ### To run this notebook: Clone this repo and install the modified library. # ### https://github.com/gorokoba560/norm-analysis-of-transformer # ## Install the changed library (only first once) # 1. move into the modified library # > $ cd transformers # # 1. install using pip (I recommend editable mode (-e option).) # > $ pip install -e . # # 1. please install pytorch (https://pytorch.org/) \ # e.g., # > $ pip install torch torchvision # ## Usage # %load_ext autoreload # %autoreload 2 # + import torch import numpy as np import pandas as pd import torch.nn as nn import transformers from sklearn.metrics import * from transformers import AdamW from tqdm.notebook import tqdm from scipy.special import softmax from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split as tts from transformers import BertTokenizerFast, BertTokenizer, BertModel, BertConfig, BertForSequenceClassification, AutoModel from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler device = "cuda" if torch.cuda.is_available() else "cpu" # + # Load pretrained model/tokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") model = BertModel.from_pretrained("bert-base-uncased").to(device) model.eval() # Tokenize text and convert to ids. input_ids = torch.tensor([tokenizer.encode("Here is some text to encode", add_special_tokens=True)]).to(device) print(input_ids) tokenized_text = tokenizer.convert_ids_to_tokens(input_ids[0]) print(tokenized_text) # - # If you want to extract vector norms (i.e., ||f(x)||, ||ฮฑf(x)||, ||ฮฃฮฑf(x)||), please set the argument "output_norms" to True. with torch.no_grad(): last_hidden_state, pooler_output, hidden_states, attentions, norms = model(input_ids=input_ids, output_hidden_states=True, output_attentions=True, output_norms=True) # Returned variable "norms" is a tuple which has 12 elements (equal to the number of layers). print(type(norms)) print(len(norms)) # + # Each element is a tuple which consists of 3 elements: ||f(x)||, ||ฮฑf(x)||, and ||ฮฃฮฑf(x)||. layer = 5 print(type(norms[layer-1])) print(len(norms[layer-1])) fx_norm, afx_norm, summed_afx_norm = norms[layer-1] # - # shape of fx_norm (||f(x)||) is (batch, num_heads, seq_length) print("shape of ||f(x)||:", fx_norm.size()) # shape of afx_norm (||ฮฑf(x)||) is (batch, num_heads, seq_length, seq_length) print("shape of ||ฮฑf(x)||:", afx_norm.size()) # shape of summed_afx_norm (||ฮฃฮฑf(x)||) is (batch, seq_length, seq_length) print("shape of ||ฮฃฮฑf(x)||:", summed_afx_norm.size()) # ## Comparison with attention weight # + import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt # Set the layer and head you want to check. (layer: 1~12, head: 1~12) layer = 8 head = 5 # - # ### Attention weight visualization (Head-level visualization) plt.figure() attention = attentions[layer-1][0][head-1].cpu().numpy() df = pd.DataFrame(attention,columns=tokenized_text,index=tokenized_text) sns.heatmap(df,cmap="Reds",square=True) plt.gcf().subplots_adjust(bottom=0.2) # ### Norm of the weighted vectors (||ฮฑf(x)||) visualization (Head-level visualization) plt.figure() afx_norm = norms[layer-1][1] norm = afx_norm[0][head-1].cpu().numpy() df = pd.DataFrame(norm,columns=tokenized_text,index=tokenized_text) sns.heatmap(df,cmap="Reds",square=True) plt.gcf().subplots_adjust(bottom=0.2) # ### Attention weight visualization (Layer-level visualization) plt.figure() attention = attentions[layer-1][0].mean(0).cpu().numpy() df = pd.DataFrame(attention,columns=tokenized_text,index=tokenized_text) sns.heatmap(df,cmap="Reds",square=True) plt.gcf().subplots_adjust(bottom=0.2) # ### Norm of the summed weighted vectors (||ฮฃ ฮฑf(x)||) visualization (Layer-level visualization) plt.figure() summed_afx_norm = norms[layer-1][2] norm = summed_afx_norm[0].cpu().numpy() df = pd.DataFrame(norm,columns=tokenized_text,index=tokenized_text) sns.heatmap(df,cmap="Reds",square=True) plt.gcf().subplots_adjust(bottom=0.2) # ## Experiment with S3 code # ## Prepare the data # + # Download humor detection data # Paper: https://arxiv.org/abs/2004.12765 #data = pd.read_csv("https://raw.githubusercontent.com/Moradnejad/ColBERT-Using-BERT-Sentence-Embedding-for-Humor-Detection/master/Data/dataset.csv") #print("\nThere are", len(data), "sentences") # Use the standard text/label columns # Create labels: 1 --> humorous, 0 --> not humorous #data["label"] = data["humor"].apply(int) #data.head() # - # # Read GEC dataset data = pd.read_csv("GEC_sentences.csv") print("\nThere are", len(data), "sentences") # Assign 1 to the erroneous sentences and zero to the rest data["text"], data["label"] = data.sentence, data["type"].apply(lambda x: 1 if x=="erroneous" else 0) del data["sentence"] print(data.head()) # + # Use a subset for quick experiments subset_data = data[:10000] # Split to train, val and test train, test = tts(subset_data[["text", "label"]], random_state=42, test_size=0.1) train, val = tts(train, random_state=42, test_size=test.shape[0]) # - # #### Tokenize and encode with BERT tokenizer # + bert_tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') # A sanity check of the tokenizer encoded_instance = bert_tokenizer.batch_encode_plus([train.iloc[0].text], padding=True) print(encoded_instance) # - print("Original text:", train.iloc[0].text) print("BERT BPEs:", bert_tokenizer.convert_ids_to_tokens(encoded_instance["input_ids"][0])) # Set max_len to the maximum length of the training data max_len = max([len(bert_tokenizer.encode(s)) for s in train.text.to_list()]) print("The maximum sentence length in training based on BERT BPEs is", max_len) # Tokenize and encode sentences in each set x_train = bert_tokenizer.batch_encode_plus( train.text.tolist(), max_length = max_len, padding=True, truncation=True ) x_val = bert_tokenizer.batch_encode_plus( val.text.tolist(), max_length = max_len, padding=True, truncation=True ) x_test = bert_tokenizer.batch_encode_plus( test.text.tolist(), max_length = max_len, padding=True, truncation=True ) # + # Convert lists to tensors in order to feed them to our PyTorch model train_seq = torch.tensor(x_train['input_ids']) train_mask = torch.tensor(x_train['attention_mask']) train_y = torch.tensor(train.label.tolist()) val_seq = torch.tensor(x_val['input_ids']) val_mask = torch.tensor(x_val['attention_mask']) val_y = torch.tensor(val.label.tolist()) test_seq = torch.tensor(x_test['input_ids']) test_mask = torch.tensor(x_test['attention_mask']) test_y = torch.tensor(test.label.tolist()) # + batch_size = 32 # Create a dataloader for each set # TensorDataset: Creates a PyTorch dataset object to load data from train_data = TensorDataset(train_seq, train_mask, train_y) # RandomSampler: specify the sequence of indices/keys used in data loading train_sampler = RandomSampler(train_data) # DataLoader: a Python iterable over a dataset train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) val_data = TensorDataset(val_seq, val_mask, val_y) val_sampler = SequentialSampler(val_data) val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size) test_data = TensorDataset(test_seq, test_mask, test_y) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=1) # - # ### Build and train the model # + # Define which BERT model to use # We will use BERT base pre-trained on uncased text model_name = "bert-base-uncased" # The BertForSequenceClassification class creates a model with BERT and a classifier on top # The classifier is a linear layer with two outputs (two is the default, if you have more labels change the config) # It uses the CrossEntropyLoss from PyTorch # from_pretrained() is used to load pre-trained weights #N.B This is now the modified version of BERT that can output the norm of the vectors. model = BertForSequenceClassification.from_pretrained(model_name, output_attentions=True) # - # Training method def training(): # Set to train mode model.train() total_loss, total_accuracy = 0, 0 # Iterate through the training batches for batch in tqdm(train_dataloader, desc="Iteration"): # Push the batch to gpu batch = [r.to(device) for r in batch] sent_id, mask, labels = batch # Clear gradients model.zero_grad() # Get model outputs outputs = model(sent_id, attention_mask=mask, labels=labels) #print(outputs[1]) # Get loss loss = outputs[0] # Add to the total loss total_loss = total_loss + loss # Backward pass to calculate the gradients loss.backward() # Update parameters optimizer.step() # Compute the training loss of the epoch epoch_loss = total_loss / len(train_dataloader) return epoch_loss # Evaluation method def evaluate(): print("\nEvaluating...") # Set to eval mode model.eval() total_loss, total_accuracy = 0, 0 predictions, targets = [], [] # Iterate through the validation batches for batch in val_dataloader: # Push the batch to gpu batch = [t.to(device) for t in batch] sent_id, mask, labels = batch # Save the gold labels to use them for evaluation targets.extend(labels.detach().cpu().numpy()) # Deactivate autograd with torch.no_grad(): # Get model outputs outputs = model(sent_id, attention_mask=mask, labels=labels) # Get loss loss = outputs[0] total_loss = total_loss + loss logits = outputs[1] # Apply softmax to the output of the model output_probs = softmax(logits.detach().cpu().numpy(), axis=1) # Get the index with the largest probability as the predicted label predictions.extend(np.argmax(output_probs, axis=1)) # Compute the validation loss of the epoch epoch_loss = total_loss / len(val_dataloader) return epoch_loss, targets, predictions # + # Push model to gpu #model = model.to(device) # Define the optimizer and the learning rate optimizer = AdamW(model.parameters(), lr = 2e-5) best_val_loss = float('inf') best_epoch = -1 train_losses=[] val_losses=[] epochs = 7 # Define the number of epochs to wait for early stopping patience = 3 # Train the model for epoch in range(epochs): print('\n Epoch {:} / {:}'.format(epoch + 1, epochs)) train_loss = training() val_loss, val_targets, val_predictions = evaluate() train_losses.append(train_loss) val_losses.append(val_loss) print("\nTraining Loss:", train_loss) print("Validation Loss:", val_loss) # Calculate the validation F1 score for the current epoch f1 = f1_score(val_targets, val_predictions, average="binary") print("F1 score:", round(f1, 3)) # Save the model with the best validation loss if val_loss < best_val_loss: best_val_loss = val_loss best_epoch = epoch torch.save(model.state_dict(), 'saved_weights.pt') # Early stopping if ((epoch - best_epoch) >= patience): print("No improvement in", patience, "epochs. Stopped training.") break # - # ## Inference # Create the model model_e = BertForSequenceClassification.from_pretrained("bert-base-uncased", output_attentions=True) # Load pre-trained weights checkpoint = torch.load("saved_weights.pt", map_location="cpu") # Add them to the model model_e.load_state_dict(checkpoint) #model_e = model_e.to(device) # + # Predict for the test set and save the results model_e.eval() test_predictions = [] test_targets = [] test_attentions = [] test_inputs = [] for batch in test_dataloader: batch = [t.to(device) for t in batch] sent_id, mask, labels = batch # Get gold labels test_targets.extend(labels.detach().cpu().numpy()) # Get input words test_inputs.append(bert_tokenizer.convert_ids_to_tokens(sent_id.detach().cpu().numpy()[0])) with torch.no_grad(): # Get predictions probs, attention, norms = model_e(sent_id, attention_mask=mask, output_norms=True, output_attentions=True) # Apply softmax to the outputs output_probs = softmax(outputs[0].detach().cpu().numpy(), axis=1) # Get the with the highest probability as the predicted label test_predictions.extend(np.argmax(output_probs, axis=1)) # Get attention weights # Attention weights from all layers are returned in a tuple # The weights from each layer are in a tensor with shape (batch_size, attention_heads, max_len, max_len) test_attentions.append(outputs[1]) # - # ## Attention and norm analysis # Get attention heatmaps import matplotlib from IPython.core.display import display, HTML def colorize(words, color_array): cmap=matplotlib.cm.Reds template = '<span class="barcode"; style="color: black; background-color: {}">{}</span>' colored_string = '' for word, color in zip(words, color_array): color = matplotlib.colors.rgb2hex(cmap(color)[:3]) colored_string += template.format(color, '&nbsp' + word + '&nbsp') return colored_string print(len(attention)) print(attention[layer-1].shape) print(len(norms)) print(len(norms[layer-1])) norms[layer-1][0].shape # Lack of Subject-Verb Agreement sentence1 = "She are going to the park." # Pronoun Disagreement sentence2 = "All girl brings their own lunch." # ### Sentence 1 # + # Encode the first sentence encoded_sentence1 = bert_tokenizer.batch_encode_plus([sentence1], padding=True) # Give as input to the model and get the outputs inputs = torch.tensor(encoded_sentence1["input_ids"]).to(device) att = torch.tensor(encoded_sentence1["attention_mask"]).to(device) probs, attentions, norms = model_e(inputs, attention_mask=att, output_attentions=True, output_norms=True) # - # Get the predictions output_probs = softmax(probs.detach().cpu().numpy(), axis=1) predictions = (np.argmax(output_probs, axis=1)) print(sentence1, ":", predictions[0]) # ### What does the CLS token attend to? How does it differ between weight based and norm-based analysis? # Visualize the attention heatmaps for the CLS token tokens = bert_tokenizer.convert_ids_to_tokens(inputs.detach().cpu().numpy()[0]) for l in range(12): print("\nLayer", l+1) attention = np.squeeze(attentions[l].detach().cpu().numpy(), axis=0) cls_attentions = [] for h, head in enumerate(attention): print("Head", h+1) # Get the attention for the cls token cls_attentions = head[0] display(HTML(colorize(tokens, cls_attentions))) # Visualize the attention heatmaps for the CLS token tokens = bert_tokenizer.convert_ids_to_tokens(inputs.detach().cpu().numpy()[0]) for l in range(12): print("\nLayer", l+1) attention = np.squeeze(norms[l][1].detach().cpu().numpy(), axis=0) cls_attentions = [] for h, head in enumerate(attention): print("Head", h+1) # Get the attention for the cls token cls_attentions = head[0] display(HTML(colorize(tokens, cls_attentions))) # + # Encode the second sentence encoded_sentence2 = bert_tokenizer.batch_encode_plus([sentence2], padding=True) # Give as input to the model and get the outputs inputs = torch.tensor(encoded_sentence2["input_ids"]).to(device) att = torch.tensor(encoded_sentence2["attention_mask"]).to(device) probs, attentions, norms = model_e(inputs, attention_mask=att, output_attentions=True, output_norms=True) # Get the predictions output_probs = softmax(probs.detach().cpu().numpy(), axis=1) predictions = (np.argmax(output_probs, axis=1)) print(sentence2, ":", predictions[0]) # - # Visualize the attention heatmaps for the CLS token tokens = bert_tokenizer.convert_ids_to_tokens(inputs.detach().cpu().numpy()[0]) for l in range(7,8): print("\nLayer", l+1) attention = np.squeeze(attentions[l].detach().cpu().numpy(), axis=0) cls_attentions = [] for h, head in enumerate(attention): print("Head", h+1) # Get the attention for the cls token cls_attentions = head[0] display(HTML(colorize(tokens, cls_attentions))) # Visualize the attention heatmaps for the CLS token tokens = bert_tokenizer.convert_ids_to_tokens(inputs.detach().cpu().numpy()[0]) for l in range(7,8): print("\nLayer", l+1) attention = np.squeeze(norms[l][1].detach().cpu().numpy(), axis=0) cls_attentions = [] for h, head in enumerate(attention): print("Head", h+1) # Get the attention for the cls token cls_attentions = head[0] display(HTML(colorize(tokens, cls_attentions)))
Modern_NLP_S4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Capstone Project # ![Data Science Workflow](img/ds-workflow.png) # ## Goal of Project # - This is the **Capstone Project** where we put it all together # - Ideally, we would look at a real business/organisation problem and turn it into a **Data Science Problem** # - As this can be hard, we will just assume we have a problem that we need to sovle # - This will be done by either making up a problem or looking at some data that interests you and make up a question. # ## Step 1: Acquire # - Explore problem # - Identify data # - Import data # ### Step 1.a: Define Problem # - This is fictional - but assume you are your customer first # - When trying to define a problem, don't be too ambitious # - Examples: # - A green energy windmill producer need to optimize distribution and need better prediction on production based on weather forecasts # - An online news media is interested in a story with how CO2 per capita around the world has evolved over the years # - Both projects are difficult # - For the windmill we would need data on production, maintenance periods, detailed weather data, just to get started. # - The data for CO2 per capita is available on [World Bank](https://data.worldbank.org/indicator/EN.ATM.CO2E.PC), but creating a visual story is difficult with our current capabilities # - Hence, make a better research problem # - You can start by considering a dataset and get inspiration # - Examples of datasets # - `files/soccer.parquet` # - `files/co2_gdp_per_capita.csv` # - [Kaggle: IMDb movies extensive dataset](https://www.kaggle.com/stefanoleone992/imdb-extensive-dataset) # - See places to find data in Lesson # - Example of Problem # - What is the highest rated movie genre? # # #### Data Science: Understanding the Problem # - Get the right question: # - What is the **problem** we try to **solve**? # - This forms the **Data Science problem** # - **Examples** # - Sales figure and call center logs: evaluate a new product # - Sensor data from multiple sensors: detect equipment failure # - Customer data + marketing data: better targeted marketing # - **Assess situation** # - Risks, Benefits, Contingencies, Regulations, Resources, Requirement # - **Define goal** # - What is the **objective**? # - What is the **success criteria**? # - **Conclusion** # - Defining the problem is key to successful Data Science projects # #### Sample project: # An online media wants to write an article on the trend of movies ratings over the time. They want to explore what is the overall trend and are there different trends in different genres. # - They ask you to make some charts showing trends # ### Step 1.b: Import libraries # - Execute the cell below (SHIFT + ENTER) import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # ### Step 1.c: Identify the Data # # #### Great Places to Find Data ([Lesson 05]()) # - [UC Irvine Machine Learning Repository!](https://archive.ics.uci.edu/ml/index.php) # - [KD Nuggets](https://www.kdnuggets.com/datasets/index.html) Datasets for Data Mining, Data Science, and Machine Learning # - [KD Nuggets](https://www.kdnuggets.com/datasets/government-local-public.html) Government, State, City, Local and Public # - [KD Nuggets](https://www.kdnuggets.com/datasets/api-hub-marketplace-platform.html) APIs, Hubs, Marketplaces, and Platforms # - [KD Nuggets](https://www.kdnuggets.com/competitions/index.html) Analytics, Data Science, Data Mining Competitions # - [data.gov](https://www.data.gov) The home of the U.S. Governmentโ€™s open data # - [data.gov.uk](https://data.gov.uk) Data published by central government # - [World Health Organization](https://www.who.int/data/gho) Explore a world of health data # - [World Bank](https://data.worldbank.org) source of world data # - [Kaggle](https://www.kaggle.com) is an online community of data scientists and machine learning practitioners. # # #### Sample project # - Example could be files in `files/imdb/` # ### Step 1.d: Import Data # #### Read CSV files *(Lesson 05)* # - Comma-Seperated Values ([Wikipedia]https://en.wikipedia.org/wiki/Comma-separated_values)) # - Learn more about Excel processing [in this YouTube lesson on CSV](https://youtu.be/LEyojSOg4EI) # - [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html): read a comma-separated values (csv) file into **pandas** DataFrame. # ```Python # import pandas as pd # data = pd.read_csv('files/aapl.csv', parse_dates=True, index_col=0) # ``` # # #### Excel files *(Lesson 05)* # - Most videly used [spreadsheet](https://en.wikipedia.org/wiki/Spreadsheet) # - Learn more about Excel processing [in this lecture](https://www.learnpythonwithrune.org/csv-groupby-processing-to-excel-with-charts-using-pandas-python/) # - [`read_excel()`](https://pandas.pydata.org/docs/reference/api/pandas.read_excel.html) Read an Excel file into a pandas DataFrame. # ```Python # data = pd.read_excel('files/aapl.xlsx', index_col='Date') # ``` # # #### Parquet files *(Lesson 05)* # - [Parquet](https://en.wikipedia.org/wiki/Apache_Parquet) is a free open source format # - Compressed format # - [`read_parquet()`](https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html) Load a parquet object from the file path, returning a DataFrame. # ```Python # data = pd.read_parquet('files/aapl.parquet') # ``` # # #### Web Scraping *(Lesson 03)* # - Extracting data from websites # - Leagal issues: [wikipedia.org](https://en.wikipedia.org/wiki/Web_scraping#Legal_issues) # - [`read_html()`](https://pandas.pydata.org/docs/reference/api/pandas.read_html.html) Read HTML tables into a list of DataFrame objects. # ```Python # url = "https://en.wikipedia.org/wiki/Wikipedia:Fundraising_statistics" # data = pd.read_html(url) # ``` # # #### Databases *(Lesson 04)* # - [`read_sql()`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) Read SQL query or database table into a DataFrame. # - The [sqlite3](https://docs.python.org/3/library/sqlite3.html) is an interface for SQLite databases. # ```Python # import sqlite3 # import pandas as pd # conn = sqlite3.connect('files/dallas-ois.sqlite') # data = pd.read_sql('SELECT * FROM officers', conn) # ``` # # #### Sample project # - If using `files/imdb/` they are stored as `parquet` # - HINT: Use `pd.read_parquet` # ### Step 1.e: Combine data # - Often you need to combine data # - Often we need to combine data from different sources # # #### pandas DataFrames # - pandas DataFrames can combine data ([pandas cheat sheet](https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf)) # - `concat([df1, df2], axis=0)`: [concat](https://pandas.pydata.org/docs/reference/api/pandas.concat.html) Concatenate pandas objects along a particular axis # - `df.join(other.set_index('key'), on='key')`: [join](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.join.html) Join columns of another DataFrame. # - `df1.merge(df2, how='inner', on='a')` [merge](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.merge.html) Merge DataFrame or named Series objects with a database-style join # # #### Sample project # - If using `files/imdb` a simple join might do the work # ## Step 2: Prepare # - Explore data # - Visualize ideas # - Cleaning data # ### Step 2.a: Explore data # - [`head()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.head.html) Return the first n rows. # - [`.shape`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.shape.html) Return a tuple representing the dimensionality of the DataFrame. # - [`.dtypes`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.dtypes.html) Return the dtypes in the DataFrame. # - [`info()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.info.html) Print a concise summary of a DataFrame. # - [`describe()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.describe.html) Generate descriptive statistics. # - [`isna()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.isna.html).[`any()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.isna.html) Returns if any element is missing. # ### Step 2.b: Groupby, Counts and Statistics # - Count groups to see the significance across results # ```Python # data.groupby('Gender').count() # ``` # - Return the mean of the values over the requested axis. # ```Python # data.groupby('Gender').mean() # ``` # - Standard Deviation # - **Standard deviation** is a measure of how dispersed (spread) the data is in relation to the mean. # - Low **standard deviation** means data is close to the mean. # - High **standard deviation** means data is spread out. # ![Standard deviation](img/std-diagram.png) # ```Python # data.groupby('Gender').std() # ``` # - Box plots # - Box plots is a great way to visualize descriptive statistics # - Notice that Q1: 25%, Q2: 50%, Q3: 75% # # ![Box plots](img/box-plot.png) # # - Make a box plot of the DataFrame columns [plot.box()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.plot.box.html) # # ```Python # data.boxplot() # ``` # ### Step 2.c: Visualize data # #### Simple Plot # ```Python # import pandas as pd # import matplotlib.pyplot as plt # # %matplotlib inline # data = pd.read_csv('files/WorldBank-ATM.CO2E.PC_DS2.csv', index_col=0) # data['USA'].plot() # ``` # - Adding title and labels # - ```title='Tilte'``` adds the title # - ```xlabel='X label'``` adds or changes the X-label # - ```ylabel='X label'``` adds or changes the Y-label # ```Python # data['USA'].plot(title='US CO2 per capita', ylabel='CO2 (metric tons per capita)') # ``` # - Adding ranges # - ```xlim=(min, max)``` or ```xlim=min``` Sets the x-axis range # - ```ylim=(min, max)``` or ```ylim=min``` Sets the y-axis range # ```Python # data['USA'].plot(title='US CO2 per capita', ylabel='CO2 (metric tons per capita)', ylim=0) # ``` # - Comparing data # ```Python # data[['USA', 'WLD']].plot(ylim=0) # ``` # # #### Scatter Plot # - Good to see any connection # ```Python # data = pd.read_csv('files/sample_corr.csv') # data.plot.scatter(x='x', y='y') # ``` # # #### Histogram # - Identifying quality # ```Python # data = pd.read_csv('files/sample_height.csv') # data.plot.hist() # ``` # - Identifying outliers # ```Python # data = pd.read_csv('files/sample_age.csv') # data.plot.hist() # ``` # - Setting bins and figsize # ```Python # data = pd.read_csv('files/WorldBank-ATM.CO2E.PC_DS2.csv', index_col=0) # data['USA'].plot.hist(figsize=(20,6), bins=10) # ``` # # #### Bar Plot # - Normal plot # ```Python # data = pd.read_csv('files/WorldBank-ATM.CO2E.PC_DS2.csv', index_col=0) # data['USA'].plot.bar() # ``` # - Range and columns, figsize and label # ```Python # data[['USA', 'DNK']].loc[2000:].plot.bar(figsize=(20,6), ylabel='CO emmission per capita') # ``` # # #### Pie Chart # - Presenting # ```Python # df = pd.Series(data=[3, 5, 7], index=['Data1', 'Data2', 'Data3']) # df.plot.pie() # ``` # - Value counts in Pie Charts # - ```colors=<list of colors>``` # - ```labels=<list of labels>``` # - ```title='<title>'``` # - ```ylabel='<label>'``` # - ```autopct='%1.1f%%'``` sets percentages on chart # ```Python # (data['USA'] < 17.5).value_counts().plot.pie(colors=['r', 'g'], labels=['>= 17.5', '< 17.5'], title='CO2', autopct='%1.1f%%') # ``` # ### Step 2.d: Clean data # - [`dropna()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.dropna.html) Remove missing values. # - [`fillna()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.fillna.html) Fill NA/NaN values using the specified method. # - Example: Fill missing values with mean. # ```Python # data = data.fillna(data.mean()) # ``` # - [`drop_duplicates()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.drop_duplicates.html) Return DataFrame with duplicate rows removed. # - Working with time series # - [`reindex()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.reindex.html) Conform Series/DataFrame to new index with optional filling logic. # - [`interpolate()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.interpolate.html) Fill NaN values using an interpolation method. # - Resources # - pandas user guide: [Working with missing data](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html) # ## Step 3: Analyze # - Feature selection # - Model selection # - Analyze data # ### Step 3.a: Split into Train and Test # - Assign dependent features (those predicting) to `X` # - Assign classes (labels/independent features) to `y` # - Divide into training and test sets # ```Python # from sklearn.model_selection import train_test_split # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # ``` # ### Step 3.b: Feature Scaling # - **Feature Scaling** transforms values in the similar range for machine learning algorithms to behave optimal. # - **Feature Scaling** can be a problems for **Machine Learing** algorithms on multiple features spanning in different magnitudes. # - **Feature Scaling** can also make it is easier to compare results # #### Feature Scaling Techniques # - **Normalization** is a special case of **MinMaxScaler** # - **Normalization**: Converts values between 0-1 # ```Python # (values - values.min())/(values.max() - values.min()) # ``` # - **MinMaxScaler**: Between any values # - **Standardization** (**StandardSclaer** from sklearn) # - Mean: 0, StdDev: 1 # ```Python # (values - values.mean())/values.std() # ``` # - Less sensitive to outliers # # #### Normalization # - [`MinMaxScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) Transform features by scaling each feature to a given range. # - `MinMaxScaler().fit(X_train)` is used to create a scaler. # - Notice: We only do it on training data # ```Python # from sklearn.preprocessing import MinMaxScaler # norm = MinMaxScaler().fit(X_train) # X_train_norm = norm.transform(X_train) # X_test_norm = norm.transform(X_test) # ``` # # #### Standarization # - [`StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) Standardize features by removing the mean and scaling to unit variance. # ```Python # from sklearn.preprocessing import StandardScaler # scale = StandardScaler().fit(X_train) # X_train_stand = scale.transform(X_train) # X_test_stand = scale.transform(X_test) # ``` # ### Step 3.c: Feature Selection # - **Feature selection** is about selecting attributes that have the greatest impact towards the **problem** you are solving. # # #### Why Feature Selection? # - Higher accuracy # - Simpler models # - Reducing overfitting risk # # #### Feature Selection Techniques # # ##### Filter methods # - Independent of Model # - Based on scores of statistical # - Easy to understand # - Good for early feature removal # - Low computational requirements # # ##### Examples # - [Chi square](https://en.wikipedia.org/wiki/Chi-squared_test) # - [Information gain](https://en.wikipedia.org/wiki/Information_gain_in_decision_trees) # - [Correlation score](https://en.wikipedia.org/wiki/Correlation_coefficient) # - [Correlation Matrix with Heatmap](https://vitalflux.com/correlation-heatmap-with-seaborn-pandas/) # # ##### Wrapper methods # - Compare different subsets of features and run the model on them # - Basically a search problem # # ##### Examples # - [Best-first search](https://en.wikipedia.org/wiki/Best-first_search) # - [Random hill-climbing algorithm](https://en.wikipedia.org/wiki/Hill_climbing) # - [Forward selection](https://en.wikipedia.org/wiki/Stepwise_regression) # - [Backward elimination](https://en.wikipedia.org/wiki/Stepwise_regression) # # See more on [wikipedia](https://en.wikipedia.org/wiki/Feature_selection#Subset_selection) # # ##### Embedded methods # - Find features that contribute most to the accuracy of the model while it is created # - Regularization is the most common method - it penalizes higher complexity # # ##### Examples # - [LASSO](https://en.wikipedia.org/wiki/Lasso_(statistics)) # - [Elastic Net](https://en.wikipedia.org/wiki/Elastic_net_regularization) # - [Ridge Regression](https://en.wikipedia.org/wiki/Ridge_regression) # # #### Remove constant and quasi constant features # - [`VarianceThreshold`](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.VarianceThreshold.html) Feature selector that removes all low-variance features. # ```Python # from sklearn.feature_selection import VarianceThreshold # sel = VarianceThreshold() # sel.fit_transform(data) # ``` # #### Remove correlated features # - The goal is to find and remove correlated features # - Calcualte correlation matrix (assign it to `corr_matrix`) # - A feature is correlated to any previous features if the following is true # - Notice that we use correlation 0.8 # ```Python # corr_features = [feature for feature in corr_matrix.columns if (corr_matrix[feature].iloc[:corr_matrix.columns.get_loc(feature)] > 0.8).any()] # ``` # ### Step 3.d: Model Selection # - The process of selecting the model among a collection of candidates machine learning models # # #### Problem type # - What kind of problem are you looking into? # - **Classification**: *Predict labels on data with predefined classes* # - Supervised Machine Learning # - **Clustering**: *Identify similarieties between objects and group them in clusters* # - Unsupervised Machine Learning # - **Regression**: *Predict continuous values* # - Supervised Machine Learning # - Resource: [Sklearn cheat sheet](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) # # #### Model Selection Techniques # - **Probabilistic Measures**: Scoring by performance and complexity of model. # - **Resampling Methods**: Splitting in sub-train and sub-test datasets and scoring by mean values of repeated runs. # # #### A few models # - [LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) Ordinary least squares Linear Regression ([Lesson 08]()). # ```Python # from sklearn.linear_model import LinearRegression # from sklearn.metrics import r2_score # lin = LinearRegression() # lin.fit(X_train, y_train) # y_pred = lin.predict(X_test) # r2_score(y_test, y_pred) # ``` # - [`SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) C-Support Vector Classification ([Lesson 10]()). # ```Python # from sklearn.svm import SVC, LinearSVC # from sklearn.metrics import accuracy_score # svc = LinearSVC() # svc.fit(X_train, y_train) # y_pred = svc.predict(X_test) # accuracy_score(y_test, y_pred) # ``` # - [`KNeighborsClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html) Classifier implementing the k-nearest neighbors vote ([Lesson 10]()). # ```Python # from sklearn.neighbors import KNeighborsClassifier # from sklearn.metrics import accuracy_score # neigh = KNeighborsClassifier() # neigh.fit(X_train.fillna(-1), y_train) # y_pred = neigh.predict(X_test.fillna(-1)) # accuracy_score(y_test, y_pred) # ``` # ### Step 3.e: Analyze Result # This is the main **check-point** of your analysis. # - Review the **Problem** and **Data Science problem** you started with. # - The analysis should add value to the **Data Science Problem** # - Sometimes our focus drifts - we need to ensure alignment with original **Problem**. # - Go back to the **Exploration** of the **Problem** - does the result add value to the **Data Science Problem** and the initial **Problem** (which formed the **Data Science Problem**) # - *Example:* As Data Scientist we often find the research itself valuable, but a business is often interested in increasing revenue, customer satisfaction, brand value, or similar business metrics. # - Did we learn anything? # - Does the **Data-Driven Insights** add value? # - *Example:* Does it add value to have evidence for: Wealthy people buy more expensive cars. # - This might add you value to confirm this hypothesis, but does it add any value for car manufacturer? # - Can we make any valuable insights from our analysis? # - Do we need more/better/different data? # - Can we give any Actionable Data Driven Insights? # - It is always easy to want better and more accurate high quality data. # - Do we have the right features? # - Do we need eliminate features? # - Is the data cleaning appropriate? # - Is data quality as expected? # - Do we need to try different models? # - Data Analysis is an iterative process # - Simpler models are more powerful # - Can result be inconclusive? # - Can we still give recommendations? # # #### Quote # > *โ€œIt is a capital mistake to theorize before one has data. Insensibly one begins to twist facts to suit theories, instead of theories to suit facts.โ€* # > - <NAME> # # #### Iterative Research Process # - **Observation/Question**: Starting point (could be iterative) # - **Hypothesis/Claim/Assumption**: Something we believe could be true # - **Test/Data collection**: We need to gether relevant data # - **Analyze/Evidence**: Based on data collection did we get evidence? # - Can our model predict? (a model is first useful when it can predict) # - **Conclude**: *Warning!* E.g.: We can conclude a correlation (this does not mean A causes B) # - Example: Based on the collected data we can see a correlation between A and B # ## Step 4: Report # - Present findings # - Visualize results # - Credibility counts # ### Step 4.a: Present Findings # - You need to *sell* or *tell* a story with the findings. # - Who is your **audience**? # - Focus on technical level and interest of your audience # - Speak their language # - Story should make sense to audience # - Examples # - **Team manager**: Might be technical, but often busy and only interested in high-level status and key findings. # - **Data engineer/science team**: Technical exploration and similar interest as you # - **Business stakeholders**: This might be end-customers or collaboration in other business units. # - When presenting # - **Goal**: Communicate actionable insights to key stakeholders # - Outline (inspiration): # - **TL;DR** (Too-long; Didnโ€™t read) - clear and concise summary of the content (often one line) that frames key insights in the context of impact on key business metrics. # - Start with your understanding of the business problem # - How does it transform into a Data Science Problem # - How will to measure impact - what business metrics are indicators of results # - What data is available and used # - Presenting hypthosis of reseach # - A visual presentation of the insights (model/analysis/key findings) # - This is where you present the evidence for the insights # - How to use insight and create actions # - Followup and continuous learning increasing value # ### Step 4.b: Visualize Results # - Telling a story with the data # - This is where you convince that the findings/insights are correct # - The right visualization is important # - Example: A correlation matrix might give a Data Engineer insights in how findings where discovered, but confuse business partners. # # #### Resources for visualization # - [Seaborn](https://seaborn.pydata.org) Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics. # - [Plotly](https://plotly.com) open-source for analytic apps in Python # - [Folium](http://python-visualization.github.io/folium/) makes it easy to visualize data thatโ€™s been manipulated in Python on an interactive leaflet map. # ### Step 4.c: Credibility Counts # - This is the check point if your research is valid # - Are you hiding findings you did not like (not supporting your hypothesis)? # - Remember it is the long-term relationship that counts # - Don't leave out results # - We learn from data and find hidden patterns, to make data-driven decisions, with a long-term perspective # ## Step 5: Actions # - Use insights # - Measure impact # - Main goal # ### Step 5.a: Use Insights # - How do we follow up on the presented **Insights**? # - **No one-size-fits-all**: It depends on the **Insights** and **Problem** # - *Examples:* # 1. **Problem**: What customers are most likely to cancel subscription? # - Say, we have insufficient knowledge of customers, and need to get more, hence we have given recommendations to gather more insights # - But you should still try to add value # 2. **Problem**: Here is our data - find valuable insights! # - This is a challenge as there is no given focus # - An iterative process involving the customer can leave you with no surprises # ### Step 5.b: Measure Impact # - If customer cannot measure impact of your work - they do not know what they pay for. # - If you cannot measure it - you cannot know if hypothesis are correct. # - A model is first valuable when it can be used to predict with some certainty # - There should be identified metrics/indicators to evaluate in the report # - This can evolve - we learn along the way - or we could be wrong. # - How long before we expect to see impact on identified business metrics? # - What if we do not see expected impact? # - Understanding of metrics # - The metrics we measure are indicators that our hypthesis is correct # - Other aspects can have impact on the result - but you need to identify that # # ### Main Goal # - Your success of a Data Scientist is to create valuable actionable insights # # #### A great way to think # - Any business/organisation can be thought of as a complex system # - Nobody understands it perfectly and it evolves organically # - Data describes some aspect of it # - It can be thought of as a black-box # - Any insights you can bring is like a window that sheds light on what happens inside
Data Science With Python/14 - Project - Capstone Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %%capture ## compile PyRoss for this notebook import os owd = os.getcwd() os.chdir('../../') # %run setup.py install os.chdir(owd) # %matplotlib inline import numpy as np from matplotlib import pyplot as plt import pyross import time # # Define model and create sample trajectory # + M = 2 # the population has two age groups N = 5e4 # and this is the total population # correct params beta = 0.02 # infection rate gIa = 1./7 # recovery rate of asymptomatic infectives gIs = 1./7 gIh = 1/14 gIc = 1/14 alpha = 0.2 # fraction of asymptomatic infectives fsa = 0.8 # the self-isolation parameter fh = 0.1 gE = 1/5 gA = 1/3 hh = 0.1*np.ones(M) # fraction which goes from Is to hospital cc = 0.05*np.ones(M) # fraction which goes from hospital to ICU mm = 0.4*np.ones(M) # mortality from IC sa = 0 # change in the population, not taken into account by inference at the moment # set the age structure fi = np.array([0.25, 0.75]) # fraction of population in age age group Ni = N*fi # set the contact structure C = np.array([[18., 9.], [3., 12.]]) # set up initial condition Ia0 = np.array([0, 0]) # each age group has asymptomatic infectives Is0 = np.array([0, 0]) # and also symptomatic infectives R0 = np.zeros((M)) E0 = np.array([10, 10]) A0 = np.array([10, 10]) Ih0 = np.zeros((M)) Ic0 = np.zeros((M)) Im0 = np.zeros((M)) S0 = Ni - (Ia0+Is0+R0+E0+A0+Ih0+Ic0+Im0) Tf = 100 Nf = Tf+1 def contactMatrix(t): return C parameters = {'alpha':alpha,'beta':beta, 'gIa':gIa,'gIs':gIs, 'gIh':gIh,'gIc':gIc, 'gE':gE, 'gA':gA, 'fsa':fsa, 'fh':fh, 'sa':sa, 'hh':hh, 'cc':cc, 'mm':mm} # use pyross stochastic to generate traj and save sto_model = pyross.stochastic.SEAI5R(parameters, M, Ni) data = sto_model.simulate(S0, E0, A0, Ia0, Is0, Ih0, Ic0, Im0, contactMatrix, Tf, Nf) data_array = data['X'] np.save('sto_traj.npy', data_array) # + # plot the stochastic solution plt.plot(data_array[:, 0], label='S') plt.plot(data_array[:, M], label='E') plt.plot(data_array[:, 2*M], label='A') plt.plot(data_array[:, 3*M], label='Ia') plt.plot(data_array[:, 4*M], label='Is') plt.legend() plt.show() # - # # Infer parameters based on part of the trajectory # + # load the data and rescale to intensive variables Tf_inference = 30 # truncate to only getting the first few datapoints Nf_inference = Tf_inference+1 x = np.load('sto_traj.npy').astype('float') x = (x[:,:8*M]/N)[:Nf_inference] steps = 101 # number internal integration steps taken # initialise the estimator estimator = pyross.inference.SEAI5R(parameters, M, fi, int(N), steps) det_model = pyross.deterministic.SEAI5R(parameters, M, fi) x_det = estimator.integrate(x[0], 0, Tf_inference, det_model, contactMatrix) # - # compute -log_p for the original (correct) parameters start_time = time.time() logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, Nf_inference, contactMatrix) end_time = time.time() print(logp) print(end_time - start_time) # + alpha_g = 0.3 beta_g = 0.1 gIa_g = 0.2 gIs_g = 0.2 gE_g = 0.4 gA_g = 0.5 guess = [alpha_g, beta_g, gIa_g, gIs_g, gE_g, gA_g] params, nit = estimator.inference(guess, x, Tf_inference, Nf_inference, contactMatrix, ftol=1e-6, verbose=True) print('best estimates: ', params) print('no. of iterations: ', nit) # - # compute log_p for best estimate start_time = time.time() parameters = estimator.make_params_dict(params) logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, Nf_inference, contactMatrix) end_time = time.time() print(logp) print(end_time - start_time) hessian = estimator.hessian(params,x,Tf_inference,Nf_inference,contactMatrix) cov = np.linalg.inv(hessian) #cov # # Compare forecast based on inferred parameters to full trajectory # + parameters = {'alpha': params[0], 'beta': params[1], 'gIa': params[2], 'gIs': params[3], 'gE':params[4],'gA':params[5], 'fsa':fsa, 'fh':fh, 'gIh':gIh,'gIc':gIc, 'sa':sa, 'hh':hh, 'cc':cc, 'mm':mm, 'cov':cov} # Initialise pyross forecast module model_forecast = pyross.forecast.SEAI5R(parameters, M, Ni) # + # Initial condition for forecast is last configuration from inference-trajectory S0_forecast = x[-1,:M]*N E0_forecast = x[-1,M:2*M]*N A0_forecast = x[-1,2*M:3*M]*N Ia0_forecast = x[-1,3*M:4*M]*N Is0_forecast = x[-1,4*M:5*M]*N Ih0_forecast = x[-1,5*M:6*M]*N Ic0_forecast = x[-1,6*M:7*M]*N Im0_forecast = x[-1,7*M: ]*N # Number of simulations over which we average Ns = 500 Tf_forecast = Tf - Tf_inference Nf_forecast = Tf_forecast+1 result_forecast = model_forecast.simulate(S0_forecast, E0_forecast, A0_forecast,Ia0_forecast, Is0_forecast, Ih0_forecast,Ic0_forecast, Im0_forecast, contactMatrix, Tf_forecast, Nf_forecast, verbose=True, Ns=Ns) trajectories_forecast = result_forecast['X'] t_forecast = result_forecast['t'] + Tf_inference # + # Plot sum of A, Ia, Is populations fontsize=25 # ylabel=r'Fraction of infectives' # # Plot total number of symptomatic infectives cur_trajectories_forecast = np.sum( trajectories_forecast[:,2*M:5*M,:] , axis = 1) cur_mean_forecast = np.mean( cur_trajectories_forecast, axis=0) percentile = 10 percentiles_lower = np.percentile(cur_trajectories_forecast,percentile,axis=0) percentiles_upper = np.percentile(cur_trajectories_forecast,100-percentile,axis=0) percentiles_median = np.percentile(cur_trajectories_forecast,50,axis=0) cur_trajectory_underlying = np.sum( data_array[:,2*M:5*M] ,axis=1 ) # # Plot trajectories # fig, ax = plt.subplots(1,1,figsize=(7,5)) ax.axvspan(0, Tf_inference, label='Range used for inference', alpha=0.3, color='dodgerblue') ax.set_title(r'Forecast with inferred parameters', y=1.05, fontsize=fontsize) for i,e in enumerate(cur_trajectories_forecast): ax.plot(t_forecast,e/N, alpha=0.15, ) ax.plot(cur_trajectory_underlying/N, lw=3, color='limegreen', label='Trajectory used for inference') ax.plot(t_forecast,percentiles_median/N, alpha=1,ls='--', color='orange',label='Median', lw=3) ''' # remove comments to plot percentiles ax.plot(t_forecast,percentiles_lower/N, alpha=1, lw=2, label='Percentiles', ls='dotted', color='red', ) ax.plot(t_forecast,percentiles_upper/N, alpha=1, lw=2, color='red', ls='dotted', ) '''; ax.set_xlim(0,np.max(t_forecast)) ax.set_ylabel(ylabel,fontsize=fontsize) ax.set_xlabel(r'$t$ [days]',fontsize=fontsize) ax.legend(loc='upper right',bbox_to_anchor=(1.6,1), fontsize=18) plt.show(fig) #fig.savefig('inference_forecast.png',dpi=100,bbox_inches='tight') plt.close(fig) # -
examples/forecast/ex07 - SEAI5R - inference + forecast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests import bs4 from bs4 import BeautifulSoup ## Downloading weather data page = requests.get("https://forecast.weather.gov/MapClick.php?lat=34.03707000000003&lon=-118.26264499999996#.YI534y9h02I") soup = BeautifulSoup(page.content,"html.parser") seven_day = soup.find(id ="seven-day-forecast") forecast_class = seven_day.find_all(class_= "tombstone-container") tonight = forecast_class[0] print(tonight.prettify()) # + ## Extracting information from the page period = tonight.find(class_="period-name").get_text() short_desc = tonight.find(class_="short-desc").get_text() temp = tonight.find(class_="temp").get_text() print(period) print(short_desc) print(temp) # - img = tonight.find("img") tonight_title = img['title'] print(tonight_title) ## Extracting all the information from the page all_period_names = seven_day.select(".tombstone-container .period-name") all_period_name =[i.get_text() for i in all_period_names] all_period_name all_period_descs = seven_day.select(".tombstone-container .short-desc") all_period_desc = [i.get_text() for i in all_period_descs] all_period_desc all_period_temps = seven_day.select(".forecast-tombstone .temp") all_period_temp =[i.get_text() for i in all_period_temps] all_period_temp all_period_titles = seven_day.select(".forecast-tombstone img") all_period_title = [i["title"] for i in all_period_titles] all_period_title # + ## Combining our data into a Pandas Dataframe import pandas weather = pandas.DataFrame({ "period":all_period_name, "short_desc":all_period_desc, "temp":all_period_temp, "desc":all_period_desc }) weather # - temp_numbers = weather["temp"].str[-5:-2] weather["temp_numbers"] = temp_numbers.astype('int') temp_numbers weather["temp_numbers"].mean() is_night = weather["temp"].str.contains("Low") weather["is_night"]= is_night is_night weather[is_night]
Web Scraping Los Angeles Weather .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Motivation: deadly COVID-19 outcomes # # # ## 1. Custom Decision Tree induction algorithm # # Here is a copy of our Decision Tree implementation. # + class DecisionNode: def __init__(self, col=-1, value=None, results=None, tb=None, fb=None): self.col = col # attribute on which to split self.value = value # value on which to split self.results = results # If the node has no children - we store here class labels with their counts self.tb = tb # True branch self.fb = fb # False branch def split(rows, column, value): # define split function according to the value type split_function = None if isinstance(value, int) or isinstance(value, float): split_function = lambda row: row[column] >= value else: split_function = lambda row: row[column] == value # Divide the rows into two sets and return them set1 = [row for row in rows if split_function(row)] set2 = [row for row in rows if not split_function(row)] return (set1, set2) def count_labels(rows): label_count = {} for row in rows: # The class label is in the last column label = row[- 1] if label not in label_count: label_count[label] = 0 label_count[label] += 1 return label_count from math import log def gini_impurity(rows): total = len(rows) counts = count_labels(rows) gini = 0 for key, val in counts.items(): p = val / total gini += p*p return (1 - gini) def entropy(rows): total = len(rows) counts = count_labels(rows) ent = 0.0 for key,val in counts.items(): p = val / total ent = ent - p * log(p, 2) return ent def variance(rows): if len(rows) == 0: return 0 num_label = [float(row[- 1]) for row in rows] mean = sum(num_label) / len(num_label) variance = sum([(d - mean) ** 2 for d in num_label]) / len(num_label) return variance def buildtree(rows, score_func=entropy, min_improvement=0, min_samples=0, max_depth=None, depth=0): if len(rows) == 0: return DecisionNode() # Compute overall score for the entire rows dataset current_score = score_func(rows) # Set up accumulator variables to track the best split criteria best_score = current_score best_criteria = None best_sets = None # Total number of features - except the last column where we store the class (target) column_count = len(rows[0]) - 1 for col in range(0, column_count): # Generate the list of unique values in # this column to split on them column_values = set() for row in rows: column_values.add(row[col]) # Now try splitting the rows # on each unique value in this column for value in column_values: (set1, set2) = split(rows, col, value) # Evaluate the quality of the split # p is the proportion of subset set1 p = float(len(set1)) / len(rows) split_score = p * score_func(set1) + (1-p) * score_func(set2) if split_score < best_score and \ (len(set1) > min_samples and len(set2) > min_samples) and \ (current_score - split_score) > min_improvement: best_score = split_score best_criteria = (col, value) best_sets = (set1, set2) # Create the sub branches if (current_score - best_score) > min_improvement and \ (max_depth is None or depth < max_depth) : # print("Splitting on",best_criteria, " 2 sets:", len(best_sets[0]),len(best_sets[1])) true_branch = buildtree(best_sets[0], score_func, min_improvement, min_samples, max_depth, depth+1) false_branch = buildtree(best_sets[1], score_func, min_improvement, min_samples, max_depth, depth+1) return DecisionNode(col=best_criteria[0], value=best_criteria[1], tb=true_branch, fb=false_branch) else: # Done splitting - summarize class labels in leaf nodes return DecisionNode(results=count_labels(rows)) def prediction(leaf_labels): total = 0 result = {} for label, count in leaf_labels.items(): total += count result[label] = count for label, val in result.items(): result[label] = str(int(result[label]/total * 100))+"%" return result def print_tree(tree, current_branch, attributes=None, indent='', leaf_funct=prediction): # Is this a leaf node? if tree.results != None: print(indent + current_branch + str(leaf_funct(tree.results))) else: # Print the split question split_col = str(tree.col) if attributes is not None: split_col = attributes[tree.col] split_val = str(tree.value) if type(tree.value) == int or type(tree.value) == float: split_val = ">=" + str(tree.value) print(indent + current_branch + split_col + ': ' + split_val + '? ') # Print the branches indent = indent + ' ' print_tree(tree.tb, 'T->', attributes, indent) print_tree(tree.fb, 'F->', attributes, indent) # - # ## 2. Coronavirus risk factors # # As discussed in the lecture, decision trees can be used not only for classification/prediction, but also to find out which atttributes are most important in classifying the record into a specific class. In this part we want to find out which symptoms/chronic conditions contribute most to the deadly outcome from catching COVID-19. # This Mexican dataset which contains the information from the Statistical Yearbooks of Morbidity 2015-2017 (as well as the information regarding cases associated with COVID-19) was found on [kaggle](https://www.kaggle.com/tanmoyx/covid19-patient-precondition-dataset). # # Download the preprocessed dataset which contains only data about patients who tested positive for COVID-19 and with symptom atributes converted to categorical: [link](https://drive.google.com/file/d/1uVd09ekR1ArLrA8qN-Xtu4l-FFbmetVy/view?usp=sharing). # # In this dataset we have the following attributes: # 1. sex: 1 -woman, 2-man # 2. age: numeric # 3. diabetes: yes/no # 4. copd (chronic obstructive pulmonary disease): yes/no # 5. asthma: yes/no # 6. imm_supr (suppressed immune system): yes/no # 7. hypertension: yes/no # 8. cardiovascular: yes/no # 9. renal_chronic: yes/no # 10. tobacco: yes/no # 11. outcome: alive/dead data_file = "../data_sets/covid_categorical_good.csv" import pandas as pd data = pd.read_csv(data_file) data = data.dropna(how="any") data.columns data_rows = data.to_numpy().tolist() len(data_rows) columns_list = data.columns.to_numpy().tolist() print(columns_list) # Build decision tree using our custom algorithm: tree = buildtree(data_rows, score_func=entropy, min_improvement=0, min_samples=30, max_depth=7) print_tree(tree, '', columns_list) # What are the most important comorbidity factors? Hard to tell. # We will try to discover them more efficiently in this project using classification rules. # Copyright &copy; 2022 <NAME>. All rights reserved.
rules_motivation.ipynb
/ -*- coding: utf-8 -*- / --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / kernelspec: / display_name: xsqlite / language: sqlite / name: xsqlite / --- / # ๆ—ฅไป˜ๅˆฅใฎๅฃฒใ‚ŠไธŠใ’ / / ่ณผ่ฒทๅฑฅๆญดใ‹ใ‚‰ๆ—ฅไป˜ๅˆฅใฎๅฃฒไธŠใ‚’่จˆ็ฎ—ใ™ใ‚‹ใ‚ฏใ‚จใƒชใงใ™ใ€‚ / + pycharm={"name": "#%%\n"} %LOAD example.db / + pycharm={"name": "#%%\n"} WITH purchases AS ( SELECT DATETIME("2021-09-01 12:34:56") AS timestamp, 500 AS amount UNION ALL SELECT DATETIME("2021-09-02 01:12:23") AS timestamp, 150 AS amount UNION ALL SELECT DATETIME("2021-09-02 12:23:34") AS timestamp, 200 AS amount UNION ALL SELECT DATETIME("2021-09-04 11:11:11") AS timestamp, 700 AS amount ) SELECT DATE(timestamp) AS date, SUM(amount) AS amount FROM purchases GROUP BY date ORDER BY date
notebooks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # Within-subject SVM classification based on beta weights (per trials) averaged within networks from different grains of MIST parcellation, for CIMAQ memory encoding task (fMRI data). # Mean network betas reflect the engagement of a particular network for each trial. # MIST Parcellations include: 7, 12, 20, 36, 64, 122, 197, 325, 444 networks # # Trials (conditions) are classifierd according to: # - task condition (encoding or control task) # - memory performance (hit vs miss, correct vs incorrect source) # - stimulus category (?) # # Each model is ran and tested on data from the same subject, and then group statistics (confidence intervals) are computed around accuracy scores from each individual participant. # + import os import sys import glob import numpy as np import pandas as pd import nilearn import scipy import nibabel as nb import sklearn import seaborn as sns import itertools from numpy import nan as NaN from matplotlib import pyplot as plt from nilearn import image, plotting from nilearn import masking from nilearn import plotting from nilearn import datasets from nilearn.plotting import plot_stat_map, plot_roi, plot_anat, plot_img, show from nilearn.input_data import NiftiMasker, NiftiLabelsMasker from sklearn.model_selection import train_test_split from sklearn.svm import SVC, LinearSVC from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score, f1_score from sklearn.model_selection import cross_val_predict, cross_val_score from sklearn.preprocessing import MinMaxScaler # - # Step 1: import list of participants, and generate sublists of participants who have enough trials per category for each classification. # # Encoding vs Control task conditions (all 94) # Stimulus category (all 94) # Hit versus Miss (42 participants; at least 15 trials per condition) # Correct Source versus Wrong Source (49 participants; at least 15 trials per condition) # Correct Source versus Miss (38 participants; at least 15 trials per condition) # *NOTE: ADD filter to exclude participants with too many scrubbed frames?? * # + # Path to directory with participant lists data_file = '/Users/mombot/Documents/Simexp/CIMAQ/Data/Participants/Splitting/Sub_list.tsv' sub_data = pd.read_csv(data_file, sep = '\t') # Exclude participants who failed QC sub_data = sub_data[sub_data['QC_status']!= 'F'] ## ADD filter to exclude participants with too many scrubbed frames?? ## # Set minimal number of trials needed per subject to include them in analysis num = 14 # Encoding vs Control, and Stimulus Category classifications all_subs = sub_data['participant_id'] all_diagnosis = sub_data['cognitive_status'] print(all_subs) print(len(all_subs)) # Hit versus Miss hm_data = sub_data[sub_data['hits'] > num] hm_data = hm_data[hm_data['miss'] > num] hm_subs = hm_data['participant_id'] hm_diagnosis = hm_data['cognitive_status'] print(hm_subs) print(len(hm_subs)) # Correct Source versus Wrong Source cw_data = sub_data[sub_data['correct_source'] > num] cw_data = cw_data[cw_data['wrong_source'] > num] cw_subs = cw_data['participant_id'] cw_diagnosis = cw_data['cognitive_status'] print(cw_subs) print(len(cw_subs)) # Correct Source versus Miss cmiss_data = sub_data[sub_data['correct_source'] > num] cmiss_data = cmiss_data[cmiss_data['miss'] > num] cmiss_subs = cmiss_data['participant_id'] cmiss_diagnosis = cmiss_data['cognitive_status'] print(cmiss_subs) print(len(cmiss_subs)) # - # Step 2. Set up paths of directories of interest # # Create empty data structures to save and export classification results # # set paths to directories of interest beta_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/Nistats/Betas' label_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/Nistats/Events' mask_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/masks' output_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/Nilearn/Group_results' # Step 3. ENCODING VERSUS CONTROL TASK CLASSIFICATION # # Build and test model for each participant on list, and compile data in a single pandas dataframe # + # ENCODING VERSUS CONTROL TASK CLASSIFICATION # build data structure to store accuracy data and coefficients enc_ctl_data = pd.DataFrame() enc_ctl_data.insert(loc = 0, column = 'dccid', value = 'None', allow_duplicates=True) # enc_ctl_data.insert(loc = 1, column = 'diagnosis', value = 'None', allow_duplicates=True) for i in range(0, 10): enc_ctl_data.insert(loc = enc_ctl_data.shape[1], column = 'CV'+str(i+1)+'_acc', value = NaN, allow_duplicates=True) enc_ctl_data.insert(loc = enc_ctl_data.shape[1], column = 'TrainSet_MeanCV_acc', value = 'None', allow_duplicates=True) enc_ctl_data.insert(loc = enc_ctl_data.shape[1], column = 'TestSet_acc', value = 'None', allow_duplicates=True) for sub in all_subs: print(sub) s_data = [sub] # load subject's beta maps (one per trial) betas = image.load_img(img=os.path.join(beta_dir, str(sub), 'TrialContrasts/betas_sub'+str(sub)+'*.nii'), wildcards=True) # initialize NiftiMasker object sub_mask = nb.load(os.path.join(mask_dir, 'func_sub'+str(sub)+'_mask_stereonl.nii')) sub_masker = NiftiMasker(mask_img=sub_mask, standardize=True) # transform subject's beta maps into vector of network means per trial X_enc_ctl = sub_masker.fit_transform(betas) # load subject's trial labels labels_file = os.path.join(label_dir, 'sub-'+str(sub)+'_enco_ctl.tsv') enco_ctl_labels = pd.read_csv(labels_file, sep='\t') y_enco_ctl = enco_ctl_labels['condition'] # mask data to exclude trials of no interest # does not apply here # Split trials into a training and a test set X_train, X_test, y_train, y_test = train_test_split( X_enc_ctl, # x y_enco_ctl, # y test_size = 0.4, # 60%/40% split shuffle = True, # shuffle dataset before splitting stratify = y_enco_ctl, # keep distribution of conditions consistent betw. train & test sets #random_state = 123 # if set number, same shuffle each time, otherwise randomization algo ) print('training:', len(X_train), 'testing:', len(X_test)) print(y_train.value_counts(), y_test.value_counts()) # define the model sub_svc = SVC(kernel='linear', class_weight='balanced') # do cross-validation to evaluate model performance # within 10 folds of training set # predict y_pred = cross_val_predict(sub_svc, X_train, y_train, groups=y_train, cv=10) # scores cv_acc = cross_val_score(sub_svc, X_train, y_train, groups=y_train, cv=10) print(cv_acc) for i in range(0, len(cv_acc)): s_data.append(cv_acc[i]) # evaluate overall model performance on training data overall_acc = accuracy_score(y_pred = y_pred, y_true = y_train) overall_cr = classification_report(y_pred = y_pred, y_true = y_train) print('Accuracy:',overall_acc) print(overall_cr) s_data.append(overall_acc) # Test model on unseen data from the test set sub_svc.fit(X_train, y_train) y_pred = sub_svc.predict(X_test) # classify age class using testing data acc = sub_svc.score(X_test, y_test) # get accuracy cr = classification_report(y_pred=y_pred, y_true=y_test) # get prec., recall & f1 # print results print('accuracy =', acc) print(cr) s_data.append(acc) # get map of coefficients # coef_ = sub_svc.coef_ # print(coef_.shape) #Return voxel weights into a nifti image using the NiftiMasker # coef_img = sub_masker.inverse_transform(coef_) #Save .nii to file # coef_img.to_filename(os.path.join(output_dir, 'Coef_maps', 'SVC_coeff_enc_ctl_sub-'+str(sub)+'.nii')) enc_ctl_data = enc_ctl_data.append(pd.Series(s_data, index=enc_ctl_data.columns), ignore_index=True) demo_data = sub_data.copy() demo_data.reset_index(level=None, drop=False, inplace=True) enc_ctl_data.insert(loc = 1, column = 'cognitive_status', value = demo_data['cognitive_status'], allow_duplicates=True) enc_ctl_data.insert(loc = 2, column = 'total_scrubbed_frames', value = demo_data['total_scrubbed_frames'], allow_duplicates=True) enc_ctl_data.insert(loc = 3, column = 'mean_FD', value = demo_data['mean_FD'], allow_duplicates=True) enc_ctl_data.insert(loc = 4, column = 'hits', value = demo_data['hits'], allow_duplicates=True) enc_ctl_data.insert(loc = 5, column = 'miss', value = demo_data['miss'], allow_duplicates=True) enc_ctl_data.insert(loc = 6, column = 'correct_source', value = demo_data['correct_source'], allow_duplicates=True) enc_ctl_data.insert(loc = 7, column = 'wrong_source', value = demo_data['wrong_source'], allow_duplicates=True) enc_ctl_data.insert(loc = 8, column = 'dprime', value = demo_data['dprime'], allow_duplicates=True) enc_ctl_data.insert(loc = 9, column = 'associative_memScore', value = demo_data['associative_memScore'], allow_duplicates=True) enc_ctl_data.to_csv(os.path.join(output_dir, 'SVC_withinSub_enc_ctl_wholeBrain.tsv'), sep='\t', header=True, index=False) # + # HIT VERSUS MISS TRIAL CLASSIFICATION # build data structure to store accuracy data and coefficients hit_miss_data = pd.DataFrame() hit_miss_data.insert(loc = 0, column = 'dccid', value = 'None', allow_duplicates=True) # hit_miss_data.insert(loc = 1, column = 'diagnosis', value = 'None', allow_duplicates=True) for i in range(0, 7): hit_miss_data.insert(loc = hit_miss_data.shape[1], column = 'CV'+str(i+1)+'_acc', value = NaN, allow_duplicates=True) hit_miss_data.insert(loc = hit_miss_data.shape[1], column = 'TrainSet_MeanCV_acc', value = 'None', allow_duplicates=True) hit_miss_data.insert(loc = hit_miss_data.shape[1], column = 'TestSet_acc', value = 'None', allow_duplicates=True) for sub in hm_subs: print(sub) s_data = [sub] # load subject's beta maps (one per trial) betas = image.load_img(img=os.path.join(beta_dir, str(sub), 'TrialContrasts/betas_sub'+str(sub)+'*.nii'), wildcards=True) # initialize NiftiLabelMasker object sub_mask = nb.load(os.path.join(mask_dir, 'func_sub'+str(sub)+'_mask_stereonl.nii')) sub_masker = NiftiMasker(mask_img=sub_mask, standardize=True) # transform subject's beta maps into vector of network means per trial X_hit_miss_ctl = sub_masker.fit_transform(betas) # load subject's trial labels labels_file = os.path.join(label_dir, 'sub-'+str(sub)+'_ctl_miss_hit.tsv') y_hit_miss_ctl = pd.read_csv(labels_file, sep='\t') y_hit_miss_ctl_labels = y_hit_miss_ctl['ctl_miss_hit'] # mask X and y data to exclude trials of no interest hit_miss_mask = y_hit_miss_ctl_labels.isin(['hit', 'missed']) y_hit_miss = y_hit_miss_ctl_labels[hit_miss_mask] X_hit_miss = X_hit_miss_ctl[hit_miss_mask] # Split trials into a training and a test set X_train, X_test, y_train, y_test = train_test_split( X_hit_miss, # x y_hit_miss, # y test_size = 0.4, # 60%/40% split shuffle = True, # shuffle dataset before splitting stratify = y_hit_miss, # keep distribution of conditions consistent betw. train & test sets #random_state = 123 # if set number, same shuffle each time, otherwise randomization algo ) print('training:', len(X_train), 'testing:', len(X_test)) print(y_train.value_counts(), y_test.value_counts()) # define the model sub_svc = SVC(kernel='linear', class_weight='balanced') # do cross-validation to evaluate model performance # within 10 folds of training set # predict y_pred = cross_val_predict(sub_svc, X_train, y_train, groups=y_train, cv=7) # scores cv_acc = cross_val_score(sub_svc, X_train, y_train, groups=y_train, cv=7) print(cv_acc) for i in range(0, len(cv_acc)): s_data.append(cv_acc[i]) # evaluate overall model performance on training data overall_acc = accuracy_score(y_pred = y_pred, y_true = y_train) overall_cr = classification_report(y_pred = y_pred, y_true = y_train) print('Accuracy:',overall_acc) print(overall_cr) s_data.append(overall_acc) # Test model on unseen data from the test set sub_svc.fit(X_train, y_train) y_pred = sub_svc.predict(X_test) # classify age class using testing data acc = sub_svc.score(X_test, y_test) # get accuracy cr = classification_report(y_pred=y_pred, y_true=y_test) # get prec., recall & f1 # print results print('accuracy =', acc) print(cr) s_data.append(acc) # get map of coefficients # coef_ = sub_svc.coef_ # print(coef_.shape) #Return voxel weights into a nifti image using the NiftiMasker # coef_img = sub_masker.inverse_transform(coef_) #Save .nii to file # coef_img.to_filename(os.path.join(output_dir, 'Coef_maps', 'SVC_coeff_hit_miss_sub-'+str(sub)+'.nii')) hit_miss_data = hit_miss_data.append(pd.Series(s_data, index=hit_miss_data.columns), ignore_index=True) demo_data = hm_data.copy() demo_data.reset_index(level=None, drop=False, inplace=True) hit_miss_data.insert(loc = 1, column = 'cognitive_status', value = demo_data['cognitive_status'], allow_duplicates=True) hit_miss_data.insert(loc = 2, column = 'total_scrubbed_frames', value = demo_data['total_scrubbed_frames'], allow_duplicates=True) hit_miss_data.insert(loc = 3, column = 'mean_FD', value = demo_data['mean_FD'], allow_duplicates=True) hit_miss_data.insert(loc = 4, column = 'hits', value = demo_data['hits'], allow_duplicates=True) hit_miss_data.insert(loc = 5, column = 'miss', value = demo_data['miss'], allow_duplicates=True) hit_miss_data.insert(loc = 6, column = 'correct_source', value = demo_data['correct_source'], allow_duplicates=True) hit_miss_data.insert(loc = 7, column = 'wrong_source', value = demo_data['wrong_source'], allow_duplicates=True) hit_miss_data.insert(loc = 8, column = 'dprime', value = demo_data['dprime'], allow_duplicates=True) hit_miss_data.insert(loc = 9, column = 'associative_memScore', value = demo_data['associative_memScore'], allow_duplicates=True) hit_miss_data.to_csv(os.path.join(output_dir, 'SVC_withinSub_hit_miss_wholeBrain.tsv'), sep='\t', header=True, index=False) # + # CORRECT SOURCE VERSUS WRONG SOURCE TRIAL CLASSIFICATION # build data structure to store accuracy data and coefficients cs_ws_data = pd.DataFrame() cs_ws_data.insert(loc = 0, column = 'dccid', value = 'None', allow_duplicates=True) # cs_ws_data.insert(loc = 1, column = 'diagnosis', value = 'None', allow_duplicates=True) for i in range(0, 7): cs_ws_data.insert(loc = cs_ws_data.shape[1], column = 'CV'+str(i+1)+'_acc', value = NaN, allow_duplicates=True) cs_ws_data.insert(loc = cs_ws_data.shape[1], column = 'TrainSet_MeanCV_acc', value = 'None', allow_duplicates=True) cs_ws_data.insert(loc = cs_ws_data.shape[1], column = 'TestSet_acc', value = 'None', allow_duplicates=True) for sub in cw_subs: print(sub) s_data = [sub] # load subject's beta maps (one per trial) betas = image.load_img(img=os.path.join(beta_dir, str(sub), 'TrialContrasts/betas_sub'+str(sub)+'*.nii'), wildcards=True) # initialize NiftiLabelMasker object sub_mask = nb.load(os.path.join(mask_dir, 'func_sub'+str(sub)+'_mask_stereonl.nii')) sub_masker = NiftiMasker(mask_img=sub_mask, standardize=True) # transform subject's beta maps into vector of network means per trial X_cs_ws_miss_ctl = sub_masker.fit_transform(betas) # load subject's trial labels labels_file = os.path.join(label_dir, 'sub-'+str(sub)+'_ctl_miss_ws_cs.tsv') y_cs_ws_miss_ctl = pd.read_csv(labels_file, sep='\t') y_cs_ws_miss_ctl_labels = y_cs_ws_miss_ctl['ctl_miss_ws_cs'] # mask X and y data to exclude trials of no interest cs_ws_mask = y_cs_ws_miss_ctl_labels.isin(['correctsource', 'wrongsource']) y_cs_ws = y_cs_ws_miss_ctl_labels[cs_ws_mask] X_cs_ws = X_cs_ws_miss_ctl[cs_ws_mask] # Split trials into a training and a test set X_train, X_test, y_train, y_test = train_test_split( X_cs_ws, # x y_cs_ws, # y test_size = 0.4, # 60%/40% split shuffle = True, # shuffle dataset before splitting stratify = y_cs_ws, # keep distribution of conditions consistent betw. train & test sets #random_state = 123 # if set number, same shuffle each time, otherwise randomization algo ) print('training:', len(X_train), 'testing:', len(X_test)) print(y_train.value_counts(), y_test.value_counts()) # define the model sub_svc = SVC(kernel='linear', class_weight='balanced') # do cross-validation to evaluate model performance # within 10 folds of training set # predict y_pred = cross_val_predict(sub_svc, X_train, y_train, groups=y_train, cv=7) # scores cv_acc = cross_val_score(sub_svc, X_train, y_train, groups=y_train, cv=7) print(cv_acc) for i in range(0, len(cv_acc)): s_data.append(cv_acc[i]) # evaluate overall model performance on training data overall_acc = accuracy_score(y_pred = y_pred, y_true = y_train) overall_cr = classification_report(y_pred = y_pred, y_true = y_train) print('Accuracy:',overall_acc) print(overall_cr) s_data.append(overall_acc) # Test model on unseen data from the test set sub_svc.fit(X_train, y_train) y_pred = sub_svc.predict(X_test) # classify age class using testing data acc = sub_svc.score(X_test, y_test) # get accuracy cr = classification_report(y_pred=y_pred, y_true=y_test) # get prec., recall & f1 # print results print('accuracy =', acc) print(cr) s_data.append(acc) # get map of coefficients # coef_ = sub_svc.coef_ # print(coef_.shape) #Return voxel weights into a nifti image using the NiftiMasker # coef_img = sub_masker.inverse_transform(coef_) #Save .nii to file # coef_img.to_filename(os.path.join(output_dir, 'Coef_maps', 'SVC_coeff_cs_ws_sub-'+str(sub)+'.nii')) cs_ws_data = cs_ws_data.append(pd.Series(s_data, index=cs_ws_data.columns), ignore_index=True) demo_data = cw_data.copy() demo_data.reset_index(level=None, drop=False, inplace=True) cs_ws_data.insert(loc = 1, column = 'cognitive_status', value = demo_data['cognitive_status'], allow_duplicates=True) cs_ws_data.insert(loc = 2, column = 'total_scrubbed_frames', value = demo_data['total_scrubbed_frames'], allow_duplicates=True) cs_ws_data.insert(loc = 3, column = 'mean_FD', value = demo_data['mean_FD'], allow_duplicates=True) cs_ws_data.insert(loc = 4, column = 'hits', value = demo_data['hits'], allow_duplicates=True) cs_ws_data.insert(loc = 5, column = 'miss', value = demo_data['miss'], allow_duplicates=True) cs_ws_data.insert(loc = 6, column = 'correct_source', value = demo_data['correct_source'], allow_duplicates=True) cs_ws_data.insert(loc = 7, column = 'wrong_source', value = demo_data['wrong_source'], allow_duplicates=True) cs_ws_data.insert(loc = 8, column = 'dprime', value = demo_data['dprime'], allow_duplicates=True) cs_ws_data.insert(loc = 9, column = 'associative_memScore', value = demo_data['associative_memScore'], allow_duplicates=True) cs_ws_data.to_csv(os.path.join(output_dir, 'SVC_withinSub_cs_ws_wholeBrain.tsv'), sep='\t', header=True, index=False) # + # CORRECT SOURCE VERSUS MISSED TRIAL CLASSIFICATION # build data structure to store accuracy data and coefficients cs_miss_data = pd.DataFrame() cs_miss_data.insert(loc = 0, column = 'dccid', value = 'None', allow_duplicates=True) # cs_miss_data.insert(loc = 1, column = 'diagnosis', value = 'None', allow_duplicates=True) for i in range(0, 7): cs_miss_data.insert(loc = cs_miss_data.shape[1], column = 'CV'+str(i+1)+'_acc', value = NaN, allow_duplicates=True) cs_miss_data.insert(loc = cs_miss_data.shape[1], column = 'TrainSet_MeanCV_acc', value = 'None', allow_duplicates=True) cs_miss_data.insert(loc = cs_miss_data.shape[1], column = 'TestSet_acc', value = 'None', allow_duplicates=True) for sub in cmiss_subs: print(sub) s_data = [sub] # load subject's beta maps (one per trial) betas = image.load_img(img=os.path.join(beta_dir, str(sub), 'TrialContrasts/betas_sub'+str(sub)+'*.nii'), wildcards=True) # initialize NiftiLabelMasker object sub_mask = nb.load(os.path.join(mask_dir, 'func_sub'+str(sub)+'_mask_stereonl.nii')) sub_masker = NiftiMasker(mask_img=sub_mask, standardize=True) # transform subject's beta maps into vector of network means per trial X_cs_ws_miss_ctl = sub_masker.fit_transform(betas) # load subject's trial labels labels_file = os.path.join(label_dir, 'sub-'+str(sub)+'_ctl_miss_ws_cs.tsv') y_cs_ws_miss_ctl = pd.read_csv(labels_file, sep='\t') y_cs_ws_miss_ctl_labels = y_cs_ws_miss_ctl['ctl_miss_ws_cs'] # mask X and y data to exclude trials of no interest cs_miss_mask = y_cs_ws_miss_ctl_labels.isin(['correctsource', 'missed']) y_cs_miss = y_cs_ws_miss_ctl_labels[cs_miss_mask] X_cs_miss = X_cs_ws_miss_ctl[cs_miss_mask] # Split trials into a training and a test set X_train, X_test, y_train, y_test = train_test_split( X_cs_miss, # x y_cs_miss, # y test_size = 0.4, # 60%/40% split shuffle = True, # shuffle dataset before splitting stratify = y_cs_miss, # keep distribution of conditions consistent betw. train & test sets #random_state = 123 # if set number, same shuffle each time, otherwise randomization algo ) print('training:', len(X_train), 'testing:', len(X_test)) print(y_train.value_counts(), y_test.value_counts()) # define the model sub_svc = SVC(kernel='linear', class_weight='balanced') # do cross-validation to evaluate model performance # within 10 folds of training set # predict y_pred = cross_val_predict(sub_svc, X_train, y_train, groups=y_train, cv=7) # scores cv_acc = cross_val_score(sub_svc, X_train, y_train, groups=y_train, cv=7) print(cv_acc) for i in range(0, len(cv_acc)): s_data.append(cv_acc[i]) # evaluate overall model performance on training data overall_acc = accuracy_score(y_pred = y_pred, y_true = y_train) overall_cr = classification_report(y_pred = y_pred, y_true = y_train) print('Accuracy:',overall_acc) print(overall_cr) s_data.append(overall_acc) # Test model on unseen data from the test set sub_svc.fit(X_train, y_train) y_pred = sub_svc.predict(X_test) # classify age class using testing data acc = sub_svc.score(X_test, y_test) # get accuracy cr = classification_report(y_pred=y_pred, y_true=y_test) # get prec., recall & f1 # print results print('accuracy =', acc) print(cr) s_data.append(acc) # get map of coefficients # coef_ = sub_svc.coef_ # print(coef_.shape) #Return voxel weights into a nifti image using the NiftiMasker # coef_img = sub_masker.inverse_transform(coef_) #Save .nii to file # coef_img.to_filename(os.path.join(output_dir, 'Coef_maps', 'SVC_coeff_cs_ws_sub-'+str(sub)+'.nii')) cs_miss_data = cs_miss_data.append(pd.Series(s_data, index=cs_miss_data.columns), ignore_index=True) demo_data = cmiss_data.copy() demo_data.reset_index(level=None, drop=False, inplace=True) cs_miss_data.insert(loc = 1, column = 'cognitive_status', value = demo_data['cognitive_status'], allow_duplicates=True) cs_miss_data.insert(loc = 2, column = 'total_scrubbed_frames', value = demo_data['total_scrubbed_frames'], allow_duplicates=True) cs_miss_data.insert(loc = 3, column = 'mean_FD', value = demo_data['mean_FD'], allow_duplicates=True) cs_miss_data.insert(loc = 4, column = 'hits', value = demo_data['hits'], allow_duplicates=True) cs_miss_data.insert(loc = 5, column = 'miss', value = demo_data['miss'], allow_duplicates=True) cs_miss_data.insert(loc = 6, column = 'correct_source', value = demo_data['correct_source'], allow_duplicates=True) cs_miss_data.insert(loc = 7, column = 'wrong_source', value = demo_data['wrong_source'], allow_duplicates=True) cs_miss_data.insert(loc = 8, column = 'dprime', value = demo_data['dprime'], allow_duplicates=True) cs_miss_data.insert(loc = 9, column = 'associative_memScore', value = demo_data['associative_memScore'], allow_duplicates=True) cs_miss_data.to_csv(os.path.join(output_dir, 'SVC_withinSub_cs_miss_wholeBrain.tsv'), sep='\t', header=True, index=False) # -
models/CIMAQ_withinSubject_wholeBrain_SVM_wGroupStats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np money = [34.52380952,27.5,52.14285714,37.09677419,93.84615385,62.5] plt.bar(x, money) plt.xticks(x, ('Bill', 'Fred', 'Mary', 'Sue')) plt.show() x = np.arange(6) f, ax = plt.subplots(figsize=(7,5)) plt.bar(x, money,fc='red') plt.xticks(x, ('Bharatnatyam', 'Kathak','Kuchipudi','Manipuri','Mohiniattam','Odissi')) plt.xlabel('Dance Forms') plt.ylabel('Class-wise Accuracy') plt.title('Model: Features extracted from Inception v3 feeded into LSTM network') plt.show()
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # @USRebellion1776 claims that the Michigan and Georgia democratic ballots could be suspicious due to differences in senate votes versus presidential votes cast in voter ballots. # # ## Let's check this out with some data! # Story: https://www.zerohedge.com/political/why-does-biden-have-so-many-more-votes-democrat-senators-swing-states?fbclid=IwAR1qj4L3RstEHmxr_FIHkeV4GPXmJsd4kX7fU8yIQMawJ2VSj3JiFZbl8Tk # Data Sources: # https://electionlab.mit.edu/data and US Census Bureau # # Skip down to "Main Questions" section for the TLDR if you don't care about the data carpentry, shaping, and calculations. import pandas as pd # State votes for President and Senate senate = pd.read_csv('senate.csv', encoding='latin-1') senate.head() president = pd.read_csv('president.csv', encoding = 'latin-1') president.head() # Let's rename candidate votes to say senate votes so we can merge with the presidential data. senate.rename(columns = {'candidatevotes': 'senatevotes'}, inplace = True) # Let's rename candidate votes to say president votes so we can merge with the senate data above. president.rename(columns = {'candidatevotes': 'presidentvotes'}, inplace = True) # Select columns that we care about (year, state, candidate votes, total votes) for each data frame and then merge together. senate_trim = senate[['year', 'state', 'party', 'senatevotes']] president_trim = president[['year', 'state', 'party', 'presidentvotes']] general_election = pd.merge(senate_trim, president_trim, on = ['year', 'state', 'party']).dropna() general_election.head() # `'president_no_senate_diff` is the number of voters who voted for a president but not a senator. general_election['president_no_senate_diff'] = (general_election['presidentvotes'] - general_election['senatevotes']) general_election['president_no_senate_prop'] = (general_election['presidentvotes'] / general_election['senatevotes']) # Let's look at republicans vs democrats only general_election = general_election.loc[(general_election['party'] == 'republican') | (general_election['party'] == 'democrat')] general_election.head() dems = general_election.loc[general_election['party'] == 'democrat'] reps = general_election.loc[general_election['party'] == 'republican'] # Remove outliers from the datasets (take roughly only 99.4% of the data) from scipy import stats dems = dems[(np.abs(stats.zscore(dems.select_dtypes('int64', 'float64'))) < 3).all(axis=1)] reps = reps[(np.abs(stats.zscore(reps.select_dtypes('int64', 'float64'))) < 3).all(axis=1)] # ## Bring in 2020 Data for Michigan and Georgia # 2020 data is not quite available but let's look at the two states in this article: Georgia and Michigan # Source: https://abcnews.go.com/Elections/2020-us-presidential-election-results-live-map # + #2020 Data pulled from five thirty eight....lets look georgia_rep_pres = 2454729 georgia_rep_senate = 2455583 georgia_dem_pres = 2463889 georgia_dem_senate = 2364345 michigan_dem_pres = 2794853 michigan_dem_senate = 2722724 michigan_rep_pres = 2646956 michigan_rep_senate = 2636667 # - # Senate versus President vote diff proportion calculations # + georgia_dem_pres_senate_prop = georgia_dem_pres / georgia_dem_senate georgia_rep_pres_senate_prop = georgia_rep_pres / georgia_rep_senate print('Georgia Democratic President, No Senate Vote Proportion: ', georgia_dem_pres_senate_prop) print('Georgia Republican President, No Senate Proportion: ', georgia_rep_pres_senate_prop) # + michigan_dem_pres_senate_prop = michigan_dem_pres / michigan_dem_senate michigan_rep_pres_senate_prop = michigan_rep_pres / michigan_rep_senate print('Michigan Democratic President, No Senate Vote: ', michigan_dem_pres_senate_prop) print('Michigan Republican President, No Senate Vote: ', michigan_rep_pres_senate_prop) # - # ## QUESTION 1: # Are Georgia Democrats abnormally voting for the president but not the senate this year? What percentile would the 2020 President to Senate voting proportions rank historically against elections going back to 1976? ga_dem2020_z_score = (georgia_dem_pres_senate_prop - np.mean(dems['president_no_senate_prop']))/ np.std(dems['president_no_senate_prop']) stats.norm.cdf(ga_dem2020_z_score) # Answer: No. This year's proportion of ballots voting for president but not for the senate versus ballots that voted for both offices ranks in the 57th percentile meaning that historically there are 43% of historical elections saw a higher proportion of ballots that voted for the presidential race but not the senate race. # ## Question 2: # Are Michigan Democrats abnormally voting for the president but not the senate this year? What percentile would the 2020 President to Senate voting proportions rank historically against elections going back to 1976? mich_dem2020_z_score = (michigan_dem_pres_senate_prop - np.mean(dems['president_no_senate_prop']))/ np.std(dems['president_no_senate_prop']) stats.norm.cdf(mich_dem2020_z_score) # Answer for Michigan: No. This year's proportion of ballots voting for president but not for the senate versus ballots that voted for both offices ranks in the 55th percentile meaning that historically 45% of historical elections saw a higher proportion of ballots that voted for the presidential race but not the senate race.
Blank Ballot Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- # + # imports # # %matplotlib inline # from ipywidgets import HBox, VBox, IntSlider, interactive_output, FileUpload, Button, Layout, Box, Label, AppLayout, interactive # import ipywidgets as wg # from IPython.display import display # import matplotlib.pyplot as plt # import numpy as np # import ipympl # header: Markdown # left: SideBar # center: Output # right: None # footer: Markdown # + # # Items flex proportionally to the weight and the left over space around the text # items_auto = [ # Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'), # Button(description='weight=3; auto', layout=Layout(flex='3 1 auto', width='auto'), button_style='danger'), # Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'), # ] # # Items flex proportionally to the weight # items_0 = [ # Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'), # Button(description='weight=3; 0%', layout=Layout(flex='3 1 0%', width='auto'), button_style='danger'), # Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'), # ] # box_layout = Layout(display='flex', # flex_flow='row', # align_items='stretch', # width='70%') # box_auto = Box(children=items_auto, layout=box_layout) # box_0 = Box(children=items_0, layout=box_layout) # display(VBox([box_auto, box_0])) # + # items_layout = Layout( width='auto') # override the default width of the button to 'auto' to let the button grow # box_layout = Layout(display='flex', # flex_flow='column', # align_items='stretch', # border='solid', # width='50%') # words = ['correct', 'horse', 'battery', 'staple'] # items = [Button(description=word, layout=items_layout, button_style='danger') for word in words] # box = Box(children=items, layout=box_layout) # box # + # item_layout = Layout(height='100px', min_width='40px') # items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)] # box_layout = Layout(overflow='scroll hidden', # border='3px solid black', # width='500px', # height='', # flex_flow='row', # display='flex') # carousel = Box(children=items, layout=box_layout) # VBox([Label('Scroll horizontally:'), carousel]) # + # # UAPET Title: # title_html = 'Underwater Animal Pose Estimation and Tracking' # # UAPET Sidebar Items: # # Load Dataframe # sidebar_items = [ # FileUpload(accept='csv', multiple=False, layout=Layout(width='auto')), # Button(description='Load Dataframe', layout=Layout(width='auto'), button_style='danger'), # ] # sidebar_layout = Layout( # display='flex', # flex_flow='column', # align_items='stretch', # width='30%', # height='auto', # ) # sidebar = Box(children=sidebar_items, layout=sidebar_layout) # VBox([title_html, sidebar]) # # UAPET Output Items: # # View Dataframe # + # def plot_func(a, f): # plt.figure(2) # x = np.linspace(0, 2*np.pi, num=1000) # y = a*np.sin(1/f*x) # plt.plot(x,y) # plt.ylim(-1.1, 1.1) # plt.title('a sin(f)') # plt.show() # interactive_plot = interactive(plot_func, a=(-1,0,0.1), f=(0,10,0.1)) # output = interactive_plot.children[-1]
spare_voila_examples.ipynb
# + from IPython.display import HTML HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> <form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''') # - #import libraries import warnings warnings.filterwarnings("ignore",category=FutureWarning) warnings.filterwarnings("ignore", message="numpy.dtype size changed") import scipy.io as spio import scipy.signal import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from collections import namedtuple import math import re import pandas as pd import os import glob from os.path import expanduser import datetime import statistics from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.graph_objs as go init_notebook_mode(connected=True) def color_negative_red(val): color = 'red' if val > 110 else 'black' return 'color: %s' % color class Keypoint: tag = "" parent = [''] child = [''] point = None def __init__(self,tag=None,parent=None,child=None,point=None): if tag is not None: self.tag = tag if parent is not None: self.parent = parent if child is not None: self.child = child if point is not None: self.point = point class Skeleton: keypoints = [Keypoint() for i in range(17)] tag2id = { "shoulderCenter" : 0, "head" : 1, "shoulderLeft" : 2, "elbowLeft" : 3, "handLeft" : 4, "shoulderRight" : 5, "elbowRight" : 6, "handRight" : 7, "hipCenter" : 8, "hipLeft" : 9, "kneeLeft" : 10, "ankleLeft" : 11, "footLeft" : 12, "hipRight" : 13, "kneeRight" : 14, "ankleRight" : 15, "footRight" : 16, } keypoints[tag2id["shoulderCenter"]] = Keypoint("shoulderCenter",[''],['head','shoulderLeft','shoulderRight','hipCenter']) keypoints[tag2id["head"]] = Keypoint("head",['shoulderCenter'],['']) keypoints[tag2id["shoulderLeft"]] = Keypoint("shoulderLeft",['shoulderCenter'],['elbowLeft']) keypoints[tag2id["elbowLeft"]] = Keypoint("elbowLeft",['shoulderLeft'],['handLeft']) keypoints[tag2id["handLeft"]] = Keypoint("handLeft",['elbowLeft'],['']) keypoints[tag2id["shoulderRight"]] = Keypoint("shoulderRight",['shoulderCenter'],['elbowRight']) keypoints[tag2id["elbowRight"]] = Keypoint("elbowRight",['shoulderRight'],['handRight']) keypoints[tag2id["handRight"]] = Keypoint("handRight",['elbowRight'],['']) keypoints[tag2id["hipCenter"]] = Keypoint("hipCenter",['shoulderCenter'],['hipLeft','hipRight']) keypoints[tag2id["hipLeft"]] = Keypoint("hipLeft",['shoulderCenter'],['kneeLeft']) keypoints[tag2id["kneeLeft"]] = Keypoint("kneeLeft",['hipLeft'],['ankleLeft']) keypoints[tag2id["ankleLeft"]] = Keypoint("ankleLeft",['kneeLeft'],['footLeft']) keypoints[tag2id["footLeft"]] = Keypoint("footLeft",['ankleLeft'],['']) keypoints[tag2id["hipRight"]] = Keypoint("hipRight",['shoulderCenter'],['kneeRight']) keypoints[tag2id["kneeRight"]] = Keypoint("kneeRight",['hipRight'],['ankleRight']) keypoints[tag2id["ankleRight"]] = Keypoint("ankleRight",['kneeRight'],['footRight']) keypoints[tag2id["footRight"]] = Keypoint("footRight",['ankleRight'],['']) def __init__(self,keyp_map=None): if keyp_map is not None: for i in range(len(keyp_map)): tag = keyp_map.keys()[i] self.keypoints[self.tag2id[tag]].point = keyp_map[tag] def getKeypoint(self,keyp_tag): return self.keypoints[self.tag2id[keyp_tag]].point def getChild(self,keyp_tag): return self.keypoints[self.tag2id[keyp_tag]].child def getParent(self,keyp_tag): return self.keypoints[self.tag2id[keyp_tag]].parent def getTransformation(self): sagittal = None coronal = None transverse = None T = np.eye(4,4) if self.getKeypoint("shoulderLeft") is not None: if self.getKeypoint("shoulderRight") is not None: sagittal = self.getKeypoint("shoulderLeft")[0]-self.getKeypoint("shoulderRight")[0] sagittal = sagittal/np.linalg.norm(sagittal) if self.getKeypoint("shoulderCenter") is not None: if self.getKeypoint("hipLeft") is not None: if self.getKeypoint("hipRight") is not None: transverse = self.getKeypoint("shoulderCenter")[0]-0.5*(self.getKeypoint("hipLeft")[0]+self.getKeypoint("hipRight")[0]) transverse = transverse/np.linalg.norm(transverse) if self.getKeypoint("shoulderCenter") is not None: pSC = self.getKeypoint("shoulderCenter")[0] if sagittal is not None: if coronal is not None: coronal = np.cross(sagittal,transverse) T[0,0]=coronal[0] T[1,0]=coronal[1] T[2,0]=coronal[2] T[0,1]=sagittal[0] T[1,1]=sagittal[1] T[2,1]=sagittal[2] T[0,2]=transverse[0] T[1,2]=transverse[1] T[2,2]=transverse[2] T[0,3]=pSC[0] T[1,3]=pSC[1] T[2,3]=pSC[2] T[3,3]=1 return T def show(self): for i in range(len(self.keypoints)): k = self.keypoints[i] print "keypoint[", k.tag, "]", "=", k.point # + class Exercise: name = "" typee = "" metrics = [] class Tug(Exercise): name = "tug" typee = "test" metrics = ["ROM_0","ROM_1","ROM_2","ROM_3","ROM_4","ROM_5","step_0"] result = [] month_res = { 0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [] } def __init__(self,month,result): self.result = result self.month_res[month] = result def getResult(self,month): return self.month_res[month] class Abduction(Exercise): name = "abduction" typee = "rehabilitation" metrics = ["ROM_0"] result = [] month_res = { 0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [] } def __init__(self,name,monthi,result): self.name = name self.result = result self.month_res[monthi] = result def getResult(self,month): return self.month_res[month] class Internal_Rotation(Exercise): name = "internal_rotation" typee = "rehabilitation" metrics = ["ROM_0"] result = [] month_res = { 0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [] } def __init__(self,name,monthi,result): self.name = name self.result = result self.month_res[monthi] = result def getResult(self,month): return self.month_res[month] class External_Rotation(Exercise): name = "external_rotation" typee = "rehabilitation" metrics = ["ROM_0"] result = [] month_res = { 0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [] } def __init__(self,name,monthi,result): self.name = name self.result = result self.month_res[monthi] = result def getResult(self,month): return self.month_res[month] class Reaching(Exercise): name = "reaching" typee = "rehabilitation" metrics = ["EP_0"] result = [] month_res = { 0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [] } def __init__(self,name,monthi,result): self.name = name self.result = result self.month_res[monthi] = result def getResult(self,month): return self.month_res[month] # + class Metric: name = '' def __init__(self,name): self.name = name class Rom(Metric): name = "ROM" tagjoint = "" refjoint = "" refdir = [] tagplane = "" def __init__(self,tagjoint,refjoint,refdir,tagplane): self.tagjoint = tagjoint self.refjoint = refjoint self.refdir = refdir self.tagplane = tagplane def compute(self,skeleton): #joint ref and child tj = skeleton.getKeypoint(self.tagjoint) tagchild = skeleton.getChild(self.tagjoint)[0] cj = skeleton.getKeypoint(tagchild) xrj = [] yrj = [] zrj = [] if self.refjoint != "": rj = skeleton.getKeypoint(self.refjoint) xrj = rj[:,0] yrj = rj[:,1] zrj = rj[:,2] #compute metric x=tj[:,0] y=tj[:,1] z=tj[:,2] xchild=cj[:,0] ychild=cj[:,1] zchild=cj[:,2] #plane over which we want to evaluate the metric plane = np.zeros(3) if tagplane == "coronal": plane[0] = 1.0 if tagplane == "sagittal": plane[1] = 1.0 if tagplane == "transverse": plane[2] = 1.0 #project v1 on the right plane invT = np.linalg.inv(skeleton.getTransformation()) cosRom = [] for i in range(len(x)): temp_ref = np.array([x[i],y[i],z[i],1]) temp_child = np.array([xchild[i],ychild[i],zchild[i],1]) transf_ref = np.inner(invT,temp_ref) transf_child = np.inner(invT,temp_child) vprocess = transf_child-transf_ref vprocess = np.delete(vprocess,3) dist = np.dot(vprocess,np.transpose(plane)) vprocess = vprocess-dist*plane n1 = np.linalg.norm(vprocess) if(n1>0): vprocess = vprocess/n1 if len(xrj)>0: temp_refjoint = np.array([xrj[i],yrj[i],zrj[i],1]) transf_refjoint = np.inner(invT,temp_refjoint) vecref = transf_ref - transf_refjoint ref = np.delete(vecref,3) else: n2 = np.linalg.norm(self.refdir) if(n2>0): self.refdir = self.refdir/n2 ref = self.refdir dotprod = np.dot(vprocess,np.transpose(ref)) cosRom.append(dotprod) rom_value = np.arccos(cosRom) result = rom_value *(180/math.pi) return result class Step(Metric): name = "step" num = [] den = [] tstart = 0.0 tend = 0.0 steplen = [] nsteps = 0 cadence = 0.0 speed = 0.0 ex_time = 0.0 def __init__(self,num,den,tstart,tend): self.num = num self.den = den self.tstart = tstart self.tend = tend def compute(self,skeleton): alj = skeleton.getKeypoint("ankleLeft") arj = skeleton.getKeypoint("ankleRight") xl=alj[:,0] yl=alj[:,1] zl=alj[:,2] xr=arj[:,0] yr=arj[:,1] zr=arj[:,2] invT = np.linalg.inv(skeleton.getTransformation()) distfeet = [] for i in range(len(xl)): temp_left = np.array([xl[i],yl[i],zl[i],1]) temp_right = np.array([xr[i],yr[i],zr[i],1]) transf_left = np.inner(invT,temp_left) transf_right = np.inner(invT,temp_right) v = transf_left-transf_right distfeet.append(np.linalg.norm(v)) filtered_distfeet = scipy.signal.filtfilt(self.num,self.den,distfeet) strikes,_ = scipy.signal.find_peaks(filtered_distfeet) filtered_distfeet_np = np.array(filtered_distfeet) slen = filtered_distfeet_np[strikes] self.steplen = statistics.mean(slen) self.nsteps = len(strikes) self.cadence = self.nsteps/(self.tend-self.tstart) self.speed = self.steplen*self.cadence self.ex_time = self.tend-self.tstart class EndPoint(Metric): name = "EP" tagjoint = "" refdir = [] tagplane = "" target = [] trajectories = [] tstart = 0.0 tend = 0.0 speed = 0.0 ex_time = 0.0 def __init__(self,tagjoint,refdir,tagplane,target,tstart,tend): self.tagjoint = tagjoint self.refdir = refdir self.tagplane = tagplane self.target = target self.tstart = tstart self.tend = tend def compute(self,skeleton): self.ex_time = self.tend-self.tstart tj = skeleton.getKeypoint(self.tagjoint) x = tj[:,0] y = tj[:,1] z = tj[:,2] plane = np.zeros(3) if tagplane == "coronal": plane[0] = 1.0 if tagplane == "sagittal": plane[1] = 1.0 if tagplane == "transverse": plane[2] = 1.0 invT = np.linalg.inv(skeleton.getTransformation()) self.trajectories = np.zeros([len(x),3]) for i in range(len(x)): temp_jnt = np.array([x[i],y[i],z[i],1]) transf_jnt = np.inner(invT,temp_jnt) v = np.delete(transf_jnt,3) dist = np.dot(v,np.transpose(plane)) v = v-dist*plane self.trajectories[i,0]=v[0] self.trajectories[i,1]=v[1] self.trajectories[i,2]=v[2] vel = np.zeros([len(x),3]) vel[:,0] = np.gradient(self.trajectories[:,0])/self.ex_time vel[:,1] = np.gradient(self.trajectories[:,1])/self.ex_time vel[:,2] = np.gradient(self.trajectories[:,2])/self.ex_time self.speed = 0.0 for i in range(len(x)): self.speed = self.speed + np.linalg.norm([vel[i,0],vel[i,1],vel[i,2]]) self.speed = self.speed/len(x) # + def loadmat(filename): ''' this function should be called instead of direct spio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects ''' data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True) return _check_keys(data) def _check_keys(dict): ''' checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries ''' for key in dict: if isinstance(dict[key], spio.matlab.mio5_params.mat_struct): dict[key] = _todict(dict[key]) return dict def _todict(matobj): ''' A recursive function which constructs from matobjects nested dictionaries ''' dict = {} for strg in matobj._fieldnames: elem = matobj.__dict__[strg] if isinstance(elem, spio.matlab.mio5_params.mat_struct): dict[strg] = _todict(elem) else: dict[strg] = elem return dict # - # # ## Dati anagrafici # + #load file home = expanduser("~") pth = home + '/.local/share/yarp/contexts/motionAnalyzer' files = glob.glob(os.path.join(pth, '*.mat')) lastfile = max(files, key=os.path.getctime) print lastfile #print personal data i = [pos for pos, char in enumerate(lastfile) if char == "-"] i1 = i[-3] i2 = i[-2] name = lastfile[i1+1:i2] surname = "" age = "" personaldata = [] personaldata.append(name) personaldata.append(surname) personaldata.append(age) table = pd.DataFrame(personaldata) table.rename(index={0:"Nome",1:"Cognome",2:"Etร "}, columns={0:"Paziente"}, inplace=True) display(table) # + data = [] ctime = [] filename = [] tagex = [] files.sort(key=os.path.getctime) for fi in files: i = [pos for pos, char in enumerate(fi) if char == "-"] i1 = i[-3] i2 = i[-2] i3 = i[-1] namei = fi[i1+1:i2] if namei == name: filename.append(fi) data.append(loadmat(fi)) #data.append(scipy.io.loadmat(fi)) tagex.append(fi[i2+1:i3]) ctime.append(os.path.getctime(fi)) # + time = [] month = [] exercises = [] ex_names = [] #count how many exercise of the same type were performed at that month countexmonth = { "tug" : [0,0,0,0,0,0,0,0,0,0,0,0], "abduction_left" : [0,0,0,0,0,0,0,0,0,0,0,0], "internal_rotation_left" : [0,0,0,0,0,0,0,0,0,0,0,0], "external_rotation_left" : [0,0,0,0,0,0,0,0,0,0,0,0], "reaching_left" : [0,0,0,0,0,0,0,0,0,0,0,0] } for i in range(len(data)): datai = data[i] time.append(datai['Time_samples']) monthi = datetime.date.fromtimestamp(ctime[i]).month-1 month.append(monthi) shoulderCenter = datai['Keypoints']['shoulderCenter'] head = datai['Keypoints']['head'] shoulderLeft = datai['Keypoints']['shoulderLeft'] shoulderRight = datai['Keypoints']['shoulderRight'] elbowLeft = datai['Keypoints']['elbowLeft'] handLeft = datai['Keypoints']['handLeft'] elbowRight = datai['Keypoints']['elbowRight'] handRight = datai['Keypoints']['handRight'] hipLeft = datai['Keypoints']['hipLeft'] hipRight = datai['Keypoints']['hipRight'] ankleLeft = datai['Keypoints']['ankleLeft'] ankleRight = datai['Keypoints']['ankleRight'] kneeLeft = datai['Keypoints']['kneeLeft'] kneeRight = datai['Keypoints']['kneeRight'] footLeft = datai['Keypoints']['footLeft'] footRight = datai['Keypoints']['footRight'] hipCenter = datai['Keypoints']['hipCenter'] key_pam = { "shoulderCenter" : shoulderCenter, "head" : head, "shoulderLeft" : shoulderLeft, "shoulderRight" : shoulderRight, "elbowLeft" : elbowLeft, "handLeft" : handLeft, "elbowRight" : elbowRight, "handRight" : handRight, "hipLeft" : hipLeft, "hipRight" : hipRight, "ankleLeft" : ankleLeft, "ankleRight" : ankleRight, "kneeLeft" : kneeLeft, "kneeRight" : kneeRight, "footLeft" : footLeft, "footRight" : footRight, "hipCenter" : hipCenter } s=Skeleton(key_pam) #s.show() exname = datai["Exercise"]["name"] exname = re.sub(r'[^\w]','',exname) ex_names.append(exname) result_singleexercise = [] allmet = datai["Exercise"]["metrics"] metrics = allmet.keys() for j in range(len(metrics)): metname = metrics[j] if "ROM" in metname: tagjoint = allmet[metname]["tag_joint"] tagjoint = re.sub(r'[^\w]', '',tagjoint) refjoint = allmet[metname]["ref_joint"] refjoint = re.sub(r'[^\w]', '',refjoint) if type(refjoint) is np.ndarray: refjoint = "" refdir = allmet[metname]["ref_dir"] tagplane = allmet[metname]["tag_plane"] tagplane = re.sub(r'[^\w]', '',tagplane) rom = Rom(tagjoint,refjoint,refdir,tagplane) result_singleexercise.append((rom,rom.compute(s))) if "step" in metname: num = allmet[metname]["num"] den = allmet[metname]["den"] tstart = allmet[metname]["tstart"] tend = allmet[metname]["tend"] step = Step(num,den,tstart,tend) step.compute(s) stepmet = [step.steplen,step.nsteps,step.cadence,step.speed,step.ex_time] result_singleexercise.append((step,stepmet)) if "EP" in metname: tagjoint = allmet[metname]["tag_joint"] tagjoint = re.sub(r'[^\w]', '',tagjoint) refdir = allmet[metname]["ref_dir"] tagplane = allmet[metname]["tag_plane"] tagplane = re.sub(r'[^\w]', '',tagplane) target = allmet[metname]["target"] tstart = allmet[metname]["tstart"] tend = allmet[metname]["tend"] ep = EndPoint(tagjoint,refdir,tagplane,target,tstart,tend) ep.compute(s) result_singleexercise.append((ep,ep.trajectories)) if exname == "tug": ex = Tug(monthi,result_singleexercise) if "abduction" in exname: ex = Abduction(exname,monthi,result_singleexercise) if "internal_rotation" in exname: ex = Internal_Rotation(exname,monthi,result_singleexercise) if "external_rotation" in exname: ex = External_Rotation(exname,monthi,result_singleexercise) if "reaching" in exname: ex = Reaching(exname,monthi,result_singleexercise) countexmonth[exname][monthi] = 1 + countexmonth[exname][monthi] exercises.append(ex) # - # # ## Report della seduta odierna # Il paziente ha svolto il seguente esercizio: # + print exname.encode('ascii') # - # # on: # + now = datetime.datetime.now() print now # - # # Di seguito si riporta il grafico: # + lastsess_time = time[-1] lastsess_result = exercises[-1].result lastsess_res_step = [] # %matplotlib inline # %matplotlib inline for i in range(len(lastsess_result)): lastsess_met,lastsess_resi = lastsess_result[i] lastsess_metname = lastsess_met.name ################ # ROM # ################ if lastsess_metname == "ROM": lastsess_metjoint = lastsess_met.tagjoint trace1 = go.Scatter( x=lastsess_time,y=lastsess_resi, mode='lines', line=dict( color='blue', width=3 ), name='<NAME>' ) data = [trace1] layout = dict( width=750, height=600, autosize=False, title='Range of Motion '+lastsess_metjoint, font=dict(family='Courier New, monospace', size=22, color='black'), xaxis=dict( title='time [s]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ) ), yaxis=dict( title='ROM [gradi]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ) ) ) fig = dict(data=data, layout=layout) iplot(fig) ################ # STEP # ################ if lastsess_metname == "step": lastsess_res_step.append(lastsess_resi) tablestep = pd.DataFrame(lastsess_res_step[0]) tablestep.rename(index={0:"Lunghezza passo [m]",1:"Numero di passi",2:"Cadenza [steps/s]", 3:"Velocitร  [m/s]",4:"Tempo di esecuzione [s]"}, columns={0:"Analisi cammino"}, inplace=True) display(tablestep) ################ # EP # ################ if lastsess_metname == "EP": target = lastsess_met.target trace1 = go.Scatter3d( x=lastsess_resi[:,0], y=lastsess_resi[:,1], z=lastsess_resi[:,2], mode = 'lines', line=dict( color='blue', width=3 ), name = 'Traiettorie' ) trace2 = go.Scatter3d( x=[target[0]], y=[target[1]], z=[target[2]], mode = 'markers', marker=dict( color='red', size=5 ), name = 'Target da raggiungere' ) data = [trace1, trace2] layout = dict( margin=dict( l=0, r=0, b=0 ), title='Traiettorie End-point', font=dict(family='Courier New, monospace', size=22, color='black'), scene=dict( xaxis=dict( title='x [cm]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ), gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), yaxis=dict( title='y [cm]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ), gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), zaxis=dict( title='z [cm]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ), gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), camera=dict( up=dict( x=0, y=0, z=1 ), eye=dict( x=-1.7428, y=1.0707, z=0.7100, ) ), aspectratio = dict( x=1, y=1, z=0.7 ), aspectmode = 'manual' ), ) fig = dict(data=data, layout=layout) iplot(fig) # - if exname == 'tug': table_tug = pd.DataFrame([['Normale'],['Buona mobilita'],['Ausili cammino'],['Rischio caduta']], index=['< 10 s','< 20 s','< 30 s','>= 30 s'], columns=['Tabella TUG']) display(table_tug) time_score = lastsess_res_step[0][-1] if time_score < 10: evaluation = 'Normale' color = 'green' elif time_score < 20: evaluation = 'Buona mobilitร , non necessita ausili' color = 'lightgreen' elif time_score < 30: evaluation = 'Necessita ausili cammino' color = 'yellow' elif time_score >= 30: evaluation = 'Rischio caduta' color = 'red' print "Il test รจ stato svolto in",round(time_score,2),"s" print "La valutazione รจ:",evaluation # # ## Andamento stato clinico # # Gli esercizi svolti dal paziente nei mesi in analisi sono raggruppati come segue: # + labels = ["reaching_left","abduction_left","internal-rotation_left","external-rotation_left","timed-up-and-go"] values = [tagex.count("reaching_left"), tagex.count("abduction_left"), tagex.count("internal_rotation_left"), tagex.count("external_rotation_left"), tagex.count("tug")] colors = ['#FEBFB3', '#E1396C', '#96D38C', '#D0F9B1'] trace = go.Pie(labels=labels, values=values, #hoverinfo='label+percent', textinfo='value', textfont=dict(size=20), marker=dict(colors=colors, line=dict(color='#000000', width=2)), hoverinfo="label+percent+value", hole=0.3 ) layout = go.Layout( title="Performed exercises", ) data = [trace] fig = go.Figure(data=data,layout=layout) iplot(fig) # - # # Di seguito, si riportano gli andamenti delle metriche del paziente, raggruppati per mese: # + keyp2rommax = { "shoulderCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "head" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "handLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "handRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "footLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "footRight" : [0,0,0,0,0,0,0,0,0,0,0,0] } keyp2rommin = { "shoulderCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "head" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "handLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "handRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "footLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "footRight" : [0,0,0,0,0,0,0,0,0,0,0,0] } countrommonth = { "shoulderCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "head" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "handLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "handRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "footLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "footRight" : [0,0,0,0,0,0,0,0,0,0,0,0] } keyp2rommax_avg = { "shoulderCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "head" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "handLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "handRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "footLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "footRight" : [0,0,0,0,0,0,0,0,0,0,0,0] } keyp2rommin_avg = { "shoulderCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "head" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "handLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "shoulderRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "elbowRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "handRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipCenter" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "footLeft" : [0,0,0,0,0,0,0,0,0,0,0,0], "hipRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "kneeRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "ankleRight" : [0,0,0,0,0,0,0,0,0,0,0,0], "footRight" : [0,0,0,0,0,0,0,0,0,0,0,0] } stepmonth = { "steplen" : [0,0,0,0,0,0,0,0,0,0,0,0], "numsteps" : [0,0,0,0,0,0,0,0,0,0,0,0], "cadence" : [0,0,0,0,0,0,0,0,0,0,0,0], "speed" : [0,0,0,0,0,0,0,0,0,0,0,0], "time" : [0,0,0,0,0,0,0,0,0,0,0,0] } countstepmonth = [0,0,0,0,0,0,0,0,0,0,0,0] endpointmonth = { "time" : [0,0,0,0,0,0,0,0,0,0,0,0], "speed" : [0,0,0,0,0,0,0,0,0,0,0,0] } countendpointmonth = [0,0,0,0,0,0,0,0,0,0,0,0] for i in range(len(exercises)): exnamei = exercises[i].name for monthi in range(12): if countexmonth[exnamei][monthi] != 0: res_exi = exercises[i].getResult(monthi) for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] single_metric_name = single_metric.name if single_metric_name == "ROM": maxromj = max(result_single_metric) minromj = min(result_single_metric) tagjoint = single_metric.tagjoint keyp2rommax[tagjoint][monthi] = maxromj + keyp2rommax[tagjoint][monthi] keyp2rommin[tagjoint][monthi] = minromj + keyp2rommin[tagjoint][monthi] countrommonth[tagjoint][monthi] = 1 + countrommonth[tagjoint][monthi] if single_metric_name == "step": stepmonth["steplen"][monthi] = single_metric.steplen + stepmonth["steplen"][monthi] stepmonth["numsteps"][monthi] = single_metric.nsteps + stepmonth["numsteps"][monthi] stepmonth["cadence"][monthi] = single_metric.cadence + stepmonth["cadence"][monthi] stepmonth["speed"][monthi] = single_metric.speed + stepmonth["speed"][monthi] stepmonth["time"][monthi] = single_metric.ex_time + stepmonth["time"][monthi] countstepmonth[monthi] = 1 + countstepmonth[monthi] if single_metric_name == "EP": endpointmonth["time"][monthi] = single_metric.ex_time + endpointmonth["time"][monthi] endpointmonth["speed"][monthi] = single_metric.speed + endpointmonth["speed"][monthi] countendpointmonth[monthi] = 1 + countendpointmonth[monthi] # + counted_exmonth = { "tug" : [0,0,0,0,0,0,0,0,0,0,0,0], "abduction_left" : [0,0,0,0,0,0,0,0,0,0,0,0], "internal_rotation_left" : [0,0,0,0,0,0,0,0,0,0,0,0], "external_rotation_left" : [0,0,0,0,0,0,0,0,0,0,0,0], "reaching_left" : [0,0,0,0,0,0,0,0,0,0,0,0] } for i in range(len(exercises)): exnamei = exercises[i].name for monthi in range(12): if countexmonth[exnamei][monthi] != 0: if counted_exmonth[exnamei][monthi] < 1: res_exi = exercises[i].getResult(monthi) for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] single_metric_name = single_metric.name if single_metric_name == "ROM": tagjoint = single_metric.tagjoint if countrommonth[tagjoint][monthi] != 0: keyp2rommax_avg[tagjoint][monthi] = keyp2rommax[tagjoint][monthi]/countrommonth[tagjoint][monthi] keyp2rommin_avg[tagjoint][monthi] = keyp2rommin[tagjoint][monthi]/countrommonth[tagjoint][monthi] if single_metric_name == "step": if countstepmonth[monthi] != 0: stepmonth["steplen"][monthi] = stepmonth["steplen"][monthi]/countstepmonth[monthi] stepmonth["numsteps"][monthi] = stepmonth["numsteps"][monthi]/countstepmonth[monthi] stepmonth["cadence"][monthi] = stepmonth["cadence"][monthi]/countstepmonth[monthi] stepmonth["speed"][monthi] = stepmonth["speed"][monthi]/countstepmonth[monthi] stepmonth["time"][monthi] = stepmonth["time"][monthi]/countstepmonth[monthi] if single_metric_name == "EP": if countendpointmonth[monthi] != 0: endpointmonth["time"][monthi] = endpointmonth["time"][monthi]/countendpointmonth[monthi] endpointmonth["speed"][monthi] = endpointmonth["speed"][monthi]/countendpointmonth[monthi] counted_exmonth[exnamei][monthi] = 1 # - # # ## Risultati del Timed Up and Go allmonths = [1,2,3,4,5,6,7,8,9,10,11,12] counted_ex = { "tug" : 0, "abduction_left" : 0, "internal_rotation_left" : 0, "external_rotation_left" : 0, "reaching_left" : 0 } for i in range(len(exercises)): exnamei = exercises[i].name res_exi = exercises[i].result if counted_ex[exnamei] < 1: ############################# # Results for TUG # ############################# if exnamei == "tug": counted_ex[exnamei] = 1 step_month_table = pd.DataFrame.from_dict(stepmonth,orient='index', columns=['Jan','Feb','Mar','Apr','May','Jun', 'Jul','Aug','Sep','Oct','Nov','Dec']) step_month_table.index = ["Numero di passi","Velocitร  [m/s]","Lunghezza passo [m]", "Cadenza [steps/s]","Tempo di esecuzione [s]"] display(step_month_table) for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] if single_metric.name == "ROM": tagjoint = single_metric.tagjoint if np.sum(keyp2rommax_avg[tagjoint]) > 0.0: trace1 = go.Bar( x=allmonths, y=keyp2rommax_avg[tagjoint], name='Massimo valore raggiunto', marker=dict( color='rgb(0,0,255)' ) ) trace2 = go.Bar( x=allmonths, y=keyp2rommin_avg[tagjoint], name='Minimo valore raggiunto', marker=dict( color='rgb(255,0,0)' ) ) layout = go.Layout( title='Parametri Range of Motion', font=dict(family='Courier New, monospace', size=18, color='black'), xaxis=dict( title='Month', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), yaxis=dict( title='ROM ' + tagjoint + ' [gradi]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), legend=dict( bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)' ), barmode='group', bargap=0.1, bargroupgap=0.0 ) data=[trace1,trace2] fig = go.Figure(data=data, layout=layout) iplot(fig) # # ## Risultati di Abduzione for i in range(len(exercises)): exnamei = exercises[i].name res_exi = exercises[i].result if counted_ex[exnamei] < 1: ################################### # Results for Abduction # ################################### if "abduction" in exnamei: counted_ex[exnamei] = 1 for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] if single_metric.name == "ROM": tagjoint = single_metric.tagjoint if np.sum(keyp2rommax_avg[tagjoint]) > 0.0: trace1 = go.Bar( x=allmonths, y=keyp2rommax_avg[tagjoint], name='Massimo valore raggiunto', marker=dict( color='rgb(0,0,255)' ) ) trace2 = go.Bar( x=allmonths, y=keyp2rommin_avg[tagjoint], name='Minimo valore raggiunto', marker=dict( color='rgb(255,0,0)' ) ) layout = go.Layout( title='Parametri Range of Motion', font=dict(family='Courier New, monospace', size=18, color='black'), xaxis=dict( title='Month', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), yaxis=dict( title='ROM ' + tagjoint + ' [gradi]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), legend=dict( bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)' ), barmode='group', bargap=0.1, bargroupgap=0.0 ) data=[trace1,trace2] fig = go.Figure(data=data, layout=layout) iplot(fig) # # ## Risultati di Rotazione Interna for i in range(len(exercises)): exnamei = exercises[i].name res_exi = exercises[i].result if counted_ex[exnamei] < 1: ################################### # Results for Internal # ################################### if "internal_rotation" in exnamei: counted_ex[exnamei] = 1 for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] if single_metric.name == "ROM": tagjoint = single_metric.tagjoint if np.sum(keyp2rommax_avg[tagjoint]) > 0.0: trace1 = go.Bar( x=allmonths, y=keyp2rommax_avg[tagjoint], name='Massimo valore raggiunto', marker=dict( color='rgb(0,0,255)' ) ) trace2 = go.Bar( x=allmonths, y=keyp2rommin_avg[tagjoint], name='Minimo valore raggiunto', marker=dict( color='rgb(255,0,0)' ) ) layout = go.Layout( title='Parametri Range of Motion', font=dict(family='Courier New, monospace', size=18, color='black'), xaxis=dict( title='Month', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), yaxis=dict( title='ROM ' + tagjoint + ' [gradi]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), legend=dict( bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)' ), barmode='group', bargap=0.1, bargroupgap=0.0 ) data=[trace1,trace2] fig = go.Figure(data=data, layout=layout) iplot(fig) # # ## Risultati di Rotazione Esterna for i in range(len(exercises)): exnamei = exercises[i].name res_exi = exercises[i].result if counted_ex[exnamei] < 1: ################################### # Results for External # ################################### if "external_rotation" in exnamei: counted_ex[exnamei] = 1 for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] if single_metric.name == "ROM": tagjoint = single_metric.tagjoint if np.sum(keyp2rommax_avg[tagjoint]) > 0.0: trace1 = go.Bar( x=allmonths, y=keyp2rommax_avg[tagjoint], name='Massimo valore raggiunto', marker=dict( color='rgb(0,0,255)' ) ) trace2 = go.Bar( x=allmonths, y=keyp2rommin_avg[tagjoint], name='Minimo valore raggiunto', marker=dict( color='rgb(255,0,0)' ) ) layout = go.Layout( title='Parametri Range of Motion', font=dict(family='Courier New, monospace', size=18, color='black'), xaxis=dict( title='Mese', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), yaxis=dict( title='ROM ' + tagjoint + ' [gradi]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), legend=dict( bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)' ), barmode='group', bargap=0.1, bargroupgap=0.0 ) data=[trace1,trace2] fig = go.Figure(data=data, layout=layout) iplot(fig) # # ## Risultati di Reaching for i in range(len(exercises)): exnamei = exercises[i].name res_exi = exercises[i].result if counted_ex[exnamei] < 1: ################################### # Results for Reaching # ################################### if "reaching" in exnamei: counted_ex[exnamei] = 1 for m in range(len(res_exi)): single_metric,result_single_metric = res_exi[m] if single_metric.name == "EP": trace1 = go.Bar( x=allmonths, y=endpointmonth["time"], name='Time', marker=dict( color='rgb(0,0,255)' ) ) layout = go.Layout( title='Parametri Reaching', font=dict(family='Courier New, monospace', size=18, color='black'), xaxis=dict( title='Mese', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), yaxis=dict( title='Tempo di esecuzione [s]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), legend=dict( bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)' ), barmode='group', bargap=0.1, bargroupgap=0.0 ) data=[trace1] fig = go.Figure(data=data, layout=layout) iplot(fig) trace1 = go.Bar( x=allmonths, y=endpointmonth["speed"], name='Speed', marker=dict( color='rgb(0,0,255)' ) ) layout = go.Layout( font=dict(family='Courier New, monospace', size=18, color='black'), xaxis=dict( title='Mese', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), yaxis=dict( title='Velocitร  [m/s]', titlefont=dict( family='Courier New, monospace', size=18, color='#7f7f7f' ), tickfont=dict( family='Courier New, monospace', size=14, color='#7f7f7f' ) ), legend=dict( bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)' ), barmode='group', bargap=0.1, bargroupgap=0.0 ) data=[trace1] fig = go.Figure(data=data, layout=layout) iplot(fig)
report/report-ita.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Binary Search Tree Reference Implementation # ### Operations: # # # **insert(val)** # * Insert val to the tree # * Val can be none, raise ValueError # # # **is_exist(val)** # * Return whether val exists in the tree # # # **find(val)** # * Return the node with value = val # * Return None if the node doesn't exist # # # **in_order()** # * Print the tree in in-order traversal # * If the tree is empty, print nothing # # # **pre_order()** # * Print the tree in pre-order traversal # * If the tree is empty, print nothing # # # **post_order()** # * Print the tree in post-order traversal # * If the tree is empty, print nothing # # **bfs()** # * Print the tree in BFS traversal # * If the tree is empty, print nothing class Node(object): def __init__(self, data, left=None, right=None): self.data = data self.left = left self.right = right def __str__(self): return str(self.data) import queue class BST(object): def __init__(self): self.root = None def insert(self, val): if val is None: raise ValueError("value to be inserted cannot be empty!") if self.root is None: self.root = Node(val) else: self._insert(self.root, val) def _insert(self, root, val): if val <= root.data: if root.left is None: root.left = Node(val) else: self._insert(root.left, val) else: if root.right is None: root.right = Node(val) else: self._insert(root.right, val) def is_exist(self, val): if val is None: raise ValueError("Value to be searched cannot be empty!") return self._is_exist(self.root, val) def _is_exist(self, root, val): if root is None: return False if root.data == val: return True if val < root.data: return self._is_exist(root.left, val) else: return self._is_exist(root.right, val) def find(self, val): if val is None: raise ValueError("Value to be found cannot be empty!") return self._find(self.root, val) def _find(self, root, val): if root is None: return None if root.data == val: return root if val < root.data: return self._find(root.left, val) else: return self._find(root.right, val) def in_order(self, root): if root is None: return self.in_order(root=root.left) print(root, end=" ") self.in_order(root=root.right) def pre_order(self, root): if root is None: return print(root, end=" ") self.pre_order(root=root.left) self.pre_order(root=root.right) def post_order(self, root): if root is None: return self.post_order(root=root.left) self.post_order(root=root.right) print(root, end=" ") def bfs(self, root): if root is None: return q = queue.Queue(maxsize=30) q.put(root) while not q.empty(): node = q.get() print(node.data, end=" ") if node.left is not None: q.put(node.left) if node.right is not None: q.put(node.right) b = BST() b.insert(5) b.insert(1) b.insert(8) b.insert(6) b.insert(9) b.insert(2) print("Pre Order:") b.pre_order(b.root) print("\nIn Order:") b.in_order(b.root) print("\nPost Order:") b.post_order(b.root) print("\nBFS:") b.bfs(b.root) print("\nDoes 8 exist in the tree? ", b.is_exist(8)) print("Does 100 exist in the tree?", b.is_exist(100)) print(b.find(9))
DataStructures/Binary Search Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Testing the new version of `pbcpy` # # write simple tests here, that will be then implemented in the proper unit tests # # + import numpy as np import sys if "../src/" not in sys.path: sys.path.insert(0,"../src/") from pbcpy.base import DirectCell, ReciprocalCell, Coord from pbcpy.grid import DirectGrid, ReciprocalGrid from pbcpy.field import DirectField, ReciprocalField import matplotlib.pyplot as plt # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - at1 = np.identity(3)*10 cell1 = DirectCell(lattice=at1, origin=[0,0,0], units="Angstrom") # 10A cubic cell cell1.lattice cell1.volume rcell = cell1.get_reciprocal() rcell.lattice dcell = rcell.get_direct() print(dcell==cell1) type(cell1), type(dcell) dcell.lattice all(cell1.lattice==at1) at1 = np.identity(3)*10 cell1 = DirectGrid(lattice=at1, origin=[0,0,0], nr=[8,8,8]) # 10A cubic cell cell1.r[0,0,1,:] reciprocal = cell1.get_reciprocal() g = reciprocal.g gg=np.einsum("ijkl,ijkl->ijk",g,g) gg[0,0,1], g[0,0,1,:], 0.125**2 from pbcpy.formats.qepp import PP water_dimer = PP(filepp="../tests/density_ks.pp").read() grad_rho = water_dimer.field.gradient() rho_g = water_dimer.field.fft() rho_g.grid.g[...,2].shape (rho_g.grid.g[...,1]*rho_g[...,0]).shape grad_rho[0,0,0,:] np.einsum("ijkl,ijkl->ijk", grad_rho, grad_rho) water_dimer.field.sigma() n = 180 nr = n,n,n m = 1 griddata_3d = np.zeros(nr,dtype=float) for i in range(n): theta = m*2*np.pi*i/n val = np.sin(theta) griddata_3d[i,:,:] = val grid = DirectGrid(lattice=np.identity(3)*2*np.pi,nr=(n,n,n),) rho = DirectField(grid=grid, griddata_3d=griddata_3d) gradrho = rho.gradient() sigmarho = rho.sigma() gradrho[:,0,0,:] plt.plot(rho[:,0,0,0]) plt.plot((n/m)*gradrho[:,0,0,0]) plt.plot((n/m)*gradrho[:,0,0,1]) plt.plot((n/m)*gradrho[:,0,0,2]) plt.plot((n/m)**2*sigmarho[:,0,0]) sigma = np.einsum("ijkl,ijkl->ijk", gradrho.real, gradrho.real) sigma.shape plt.plot(2500*sigma[:,0,0]) A = np.ones((10,5,3)) A np.einsum("ijk,ijk->ij", A,2*A)
notebooks/pbcpy2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="_kEhsakh8OeU" # <a href="https://colab.research.google.com/github/davemlz/eemont/blob/master/tutorials/004-Computing-Spectral-Indices-Landsat-8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="jZEthLln92Ep" # # Computing Spectral Indices in Landsat 8 # + [markdown] id="dNa470OZ8Oec" # - GitHub Repo: [https://github.com/davemlz/eemont](https://github.com/davemlz/eemont) # - PyPI link: [https://pypi.org/project/eemont/](https://pypi.org/project/eemont/) # - Documentation: [https://eemont.readthedocs.io/en/latest/index.html](https://eemont.readthedocs.io/en/latest/index.html) # + [markdown] id="CD7h0hbi92Er" # ## Let's start! # + [markdown] id="E0rc6Cya92Es" # If required, please uncomment: # + id="NYzyvKtk92Es" # #!pip install eemont # #!pip install geemap # + [markdown] id="x3Rm3qt_92Et" # Import the required packges. # + id="H0C9S_Hh92Et" import ee, eemont, datetime, geemap # + [markdown] id="k1sdX2p592Eu" # Authenticate and Initialize Earth Engine and geemap. # + id="7QDXqVwy8Oef" Map = geemap.Map() # + [markdown] id="FYguKZh892Ey" # Point of interest. # + id="R57SR6Xl8Oeg" point = ee.Geometry.Point([-74.0592,11.3172]) # + [markdown] id="qwevPFML8Oeg" # Get, filter, mask clouds and scale the image collection. # + id="sBmM9kZn92Ez" L8 = (ee.ImageCollection('LANDSAT/LC08/C01/T1_SR') .filterBounds(point) .sort('CLOUD_COVER') .first() .maskClouds() .scale()) # + [markdown] id="RyTMix-B8Oeh" # ## Spectral Indices # # Spectral indices can be computed for Sentinel-2 and Landsat Products using the `index()` method. # + id="EYpINsAq8Oeh" L8 = L8.index() # + [markdown] id="xcfhDCrs8Oei" # By default, the NDVI is computed and added as a new band. # + colab={"base_uri": "https://localhost:8080/"} id="XFn1GOPf8Oei" outputId="bebc3aa7-c04a-4b11-d2c7-1c8b1b66aee5" L8.bandNames().getInfo() # + [markdown] id="-V-83HGx8Oej" # If required, another index can be computed: # + id="7qGKatbu8Oek" L8 = L8.index('BAI') # + [markdown] id="_Wgph3Rg8Oek" # This new index is now added as another new band. # + colab={"base_uri": "https://localhost:8080/"} id="uRwIgdhh8Oek" outputId="4c7c5281-12e2-4058-a2a7-12af978b09ce" L8.bandNames().getInfo() # + [markdown] id="vb7wYuec8Oel" # Multiple indices can also be computed. # + id="CLZNOi7x8Oel" L8 = L8.index(['EVI','GNDVI','RVI']) # + [markdown] id="nL2GQofP8Oel" # These indices are now added as new bands: # + colab={"base_uri": "https://localhost:8080/"} id="UfNL9i788Oel" outputId="afc273ef-2a03-4c7f-a180-14ba79ef93e7" L8.bandNames().getInfo() # + [markdown] id="rUTk7FTf8Oeq" # Group of indices can also be added by the group name. The available options are: vegetation, water, burn, snow or all (compute all spectral indices): # + id="imXL5B9x8Oeq" L8 = L8.index('water') # Computes NDWI and MNDWI # + [markdown] id="ovRsdFUL8Oeq" # The computed indices are now added as new bands: # + colab={"base_uri": "https://localhost:8080/"} id="9tu_egEX8Oer" outputId="de6d662b-c418-4c12-ec85-1ec8da0bb137" L8.bandNames().getInfo() # + [markdown] id="JZ-8JCCl8Oer" # ## Visualization # + [markdown] id="82LrL24j8Oer" # Now, let's plot two of the computed indices: GNDVI and NDWI. # + [markdown] id="h3OCUhiZ8Oes" # Let's define the visualization parameters for the GNDVI: # + id="IMRwDLXS8Oes" visParamsGNDVI = { 'min': 0.0, 'max': 1.0, 'palette': [ 'FFFFFF', 'CE7E45', 'DF923D', 'F1B555', 'FCD163', '99B718', '74A901', '66A000', '529400', '3E8601', '207401', '056201', '004C00', '023B01', '012E01', '011D01', '011301' ], } # + [markdown] id="jf2Hmx_j8Oes" # Let's define the visualization parameters for the NDWI: # + id="leMK1WUh8Oes" visParamsNDWI = { 'min': 0.0, 'max': 1.0, 'palette': ['0000ff', '00ffff', 'ffff00', 'ff0000', 'ffffff'], } # + [markdown] id="QN_NYqWo8Oet" # Use `geemap` to display results: # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_-zl_7I78Oet" outputId="95ce28cf-6cd1-4ad1-fc2d-ea95282e303c" Map.centerObject(point,9) Map.addLayer(L8.select('GNDVI'),visParamsGNDVI,'GNDVI') Map.addLayer(L8.select('NDWI'),visParamsNDWI,'NDWI') Map.add_colorbar(visParamsGNDVI['palette'], caption = 'GNDVI') Map.add_colorbar(visParamsNDWI['palette'], caption = 'NDWI') Map
tutorials/004-Computing-Spectral-Indices-Landsat-8.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.3 # language: julia # name: julia-1.5 # --- # # Cรกlculo Numรฉrico - 2021.2 - Tarefa 4 - Professor <NAME> # ## Realizado por: <NAME> - 120047390 # ## Questรตes discutidas com: <NAME>, <NAME>, <NAME> # # Exercรญcio 1.1.2 # Para este exercรญcio, utilizaremos a tรฉcnica do Mรฉtodo de Newton para calcular uma aproximaรงรฃo de ln(3) utilizando 20 passos do mรฉtodo. # # Para isto, queremos uma funรงรฃo $f(x)$ tal que $f(ln(3)) = 0$. Como sabemos calcular $e^{x}$ para qualquer $x$ e sabemos as propriedades de $ln$, podemos utilizar a funรงรฃo $f(x) = 3 - e^{x}$. # # Com isto, temos que # # $$f(x) = 3 - e^{x}$$ # $$f'(x) = -e^x$$ # # Assim, podemos utilizar o Mรฉtodo de Newton na forma # # $$x = x_{init} - \dfrac{f(x_{init})}{f'(x_{init})} = x_{init} - \dfrac{3-e^{x_{init}}}{-e^{x_{init}}}$$ # # tal que $x$ รฉ o nรบmero que queremos aproximar e $x_{init}$ รฉ um chute inicial. # # Com isto, podemos utilizar o algoritmo abaixo para realizar a aproximaรงรฃo. # Exemplo de Metodo de Newton para achar ln(3) function newton_ln_3(iteracoes, chute) # Nรบmero de iteraรงรตes e chute inicial for i=1:iteracoes chute = chute - (3-(Base.MathConstants.e^chute))/(-Base.MathConstants.e^chute) # Chute - f(chute)/f'(chute) end return chute # Aproximaรงรฃo do resultado. Quanto mais iteraรงรตes, maior a precisรฃo. end # Passaremos entรฃo para funรงรฃo um nรบmero de passos $20$ e uma aproximaรงรฃo inicial $1$ newton_ln_3(20, 1) # # Exercรญcio 1.1.4 # # # Para este exercรญcio, utilizaremos a tรฉcnica da Interpolaรงรฃo Polinomial de Grau 1 para calcular uma aproximaรงรฃo de ln(3) e estimaremos o erro mรกximo. Utilizaremos entรฃo um conjunto de pontos afim de montar o sistema de equaรงรตes que resultarรก na matriz de Vandermonde para encontrarmos uma funรงรฃo aproximada que nos darรก o valor de ln(3). # # Podemos tirar uma aproximaรงรฃo da funรงรฃo logaritmica prรณxima de um polinรดmio de grau 1 utilizando valores triviais, isto รฉ, $ln(1)$ e $ln(e)$, pois podem ser calculados usando apenas as propriedades de logaritmo. Com isto, teremos uma funรงรฃo que passa pelos pontos $(x_{1},y_{1}),(x_{2},y_{2})$ e que nos darรก um polinรดmio na forma # $$P = c_{1}x + c_{0}$$ # # $$ # \begin{cases} # c_{1}x_{0} + c_{0} = y_{0} \\ # c_{1}x_{1} + c_{0} = y_{1} \\ # \end{cases} # $$ # # Usaremos entรฃo os pontos $(ln(1), 0),(ln(e),1)$. Portanto, temos que # # \begin{cases} # c_{1}ln(1) + c_{0} = 0 \\ # c_{1}ln(e) + c_{0} = 1 \\ # \end{cases} # # Realizando as conversรตes necessรกrias para exp, temos que, com isto, podemos modelar as matrizes da seguinte forma # # $$ # V = \begin{bmatrix} # 1 & 1 \\ # 1 & exp(1) \\ # \end{bmatrix} # $$ # # $$ # y = \begin{bmatrix} # 0 \\ # 1 \\ # \end{bmatrix} # $$ # # $$ # C = \begin{bmatrix} # c_{0} \\ # c_{1} \\ # \end{bmatrix} # $$ # # Devemos entรฃo resolver o sistema tal que $V \cdot c = y$. Vamos entรฃo utilizar o mรฉtodo interpolacao_ln3_grau1 descrito abaixo para resolver o sistema, retornando os coeficientes do polinรดmio de grau 1. function interpolacao_ln3_grau1() # Cria a matriz V x = [1; exp(1)] y = [0; 1] V=[x.^0 x.^1] c=V\y # Resolve o sistema linear Vc=y return c #vetor de coeficientes end # Com isto, podemos utilizar o mรฉtodo para achar os coeficientes da polinรดmio de grau 1 que nos dรก uma aproximaรงรฃo da funรงรฃo original. Podemos entรฃo armazenar esses coeficientes, montar a funรงรฃo afim e calcular uma aproximaรงรฃo de ln(3) a partir desta funรงรฃo. coefs = interpolacao_ln3_grau1() c1 = coefs[2] c0 = coefs[1] f(x) = c1*x + c0 f(3) # Com isto, podemos agora tambรฉm estipular o erro mรกximo na interpolaรงรฃo de grau 1, tal que este serรก dado por # # $$\dfrac{f''(\epsilon)}{2!}(x - x_{0})(x - x_{1}) = f(\alpha) - p(\alpha) \leq \dfrac{M}{(2!)}*(x - x_{0})(x - x_{1})$$ # # para todo $\epsilon$ entre $x_{0} = 0$ e $x_{1} = 1$ # # Exercรญcio 1.1.5 # # # Para este exercรญcio, utilizaremos a tรฉcnica da Interpolaรงรฃo Polinomial de Grau 2 para calcular uma aproximaรงรฃo de ln(3) e estimaremos o erro mรกximo. Utilizaremos entรฃo um conjunto de pontos afim de montar o sistema de equaรงรตes que resultarรก na matriz de Vandermonde para encontrarmos uma funรงรฃo aproximada que nos darรก o valor de ln(3). # # Podemos tirar uma aproximaรงรฃo da funรงรฃo logaritmica prรณxima de um polinรดmio de grau 2 utilizando valores triviais, isto รฉ, $ln(1)$, $ln(e)$ e $ln(e^{2})$ pois podem ser calculados usando apenas as propriedades de logaritmo. Com isto, teremos uma funรงรฃo que passa pelos pontos $(x_{1},y_{1}),(x_{2},y_{2}), (x_{3},y_{3})$ e que nos darรก um polinรดmio na forma # # $$P = c_{2}x^2 + c_{1}x + c_{0}$$ # # $$ # \begin{cases} # c_{2}x_{0}^2 + c_{1}x_{0} + c_{0} = y_{0} \\ # c_{2}x_{1}^2 + c_{1}x_{1} + c_{0} = y_{1} \\ # c_{2}x_{2}^2 + c_{1}x_{2} + c_{0} = y_{2} \\ # \end{cases} # $$ # # Usaremos entรฃo os pontos $(ln(1), 0),(ln(e),1)$ e $(ln(e^2), 2)$. Portanto, temos que # # \begin{cases} # c_{2}ln(1)^2 + c_{1}ln(1) + c_{0} = 0 \\ # c_{2}ln(e)^2 + c_{1}ln(e) + c_{0} = 1 \\ # c_{2}ln(e^2)^2 + c_{1}ln(e^2) + c_{0} = 2 \\ # \end{cases} # # Realizando as conversรตes necessรกrias para exp, temos que, com isto, podemos modelar as matrizes da seguinte forma # # $$ # V = \begin{bmatrix} # 1 & 1 & 1^2 \\ # 1 & exp(1) & exp(1)^2 \\ # 1 & exp(2) & exp(2)^2 \\ # \end{bmatrix} # $$ # # $$ # y = \begin{bmatrix} # 0 \\ # 1 \\ # 2 \\ # \end{bmatrix} # $$ # # $$ # C = \begin{bmatrix} # c_{0} \\ # c_{1} \\ # c_{2} \\ # \end{bmatrix} # $$ # # Devemos entรฃo resolver o sistema tal que $V \cdot c = y$. Vamos entรฃo utilizar o mรฉtodo interpolacao_ln3_grau2 descrito abaixo para resolver o sistema, retornando os coeficientes do polinรดmio de grau 2. function interpolacao_ln3_grau2() # Cria a matriz V x = [1; exp(1); exp(2)] y = [0; 1; 2] V=[x.^0 x.^1 x.^2] c=V\y # Resolve o sistema linear Vc=y return c #vetor de coeficientes end # Com isto, podemos utilizar o mรฉtodo para achar os coeficientes da polinรดmio de grau 2 que nos dรก uma aproximaรงรฃo da funรงรฃo original. Podemos entรฃo armazenar esses coeficientes, montar a funรงรฃo quadrรกtica e calcular uma aproximaรงรฃo de ln(3) a partir desta funรงรฃo. coefs = interpolacao_ln3_grau2() c2 = coefs[3] c1 = coefs[2] c0 = coefs[1] f(x) = c2*x^2 + c1*x + c0 f(3) # Com isto, podemos agora tambรฉm estipular o erro mรกximo na interpolaรงรฃo de grau 2, tal que este serรก dado por # # $$\dfrac{f'''(\epsilon)}{3!}(x - x_{0})(x - x_{1})(x - x_{2})$$ # # para todo $\epsilon$ entre $x_{0}$ e $x_{2}$ # # Exercรญcio 1.2) # Para este exercรญcio, iremos gerar aleatoriamente 30 pontos em um polinรดmio de grau 5. Apรณs isto, faremos a regressรฃo polinomial com polinรดmios de grau 0 atรฉ 29. Faremos entรฃo a anรกlise do caso e para polinรดmios de grau maior que 29. No final de tudo, plotaremos o erro total por grau. # # # + using Plots using Random using LinearAlgebra Random.seed!(0) # Gerando dados fictรญcios para teste n = 30 x = range(0, 2, length=n) f(x) = (x-1)^5 y = f.(x) + randn(n)*0.05 #ruรญdo scatter(x, y, c=:lightblue, ms=3, leg=false) # - # Apรณs, podemos utilizar a funรงรฃo vandermon abaixo para montar a matriz de Vandermon necessรกria para realizar o cรกlculo a depender do grau da funรงรฃo. # function vandermonde(x,y,grau) n,=size(y) V=zeros(n,grau+1) for i=1:n #linhas for j=1:(grau+1) V[i,j]=x[i]^(j-1) end end return V end # Com isto, podemos aplicar a regressรฃo para resolver o sistema e nos dar uma aproximaรงรฃo da funรงรฃo variando do grau $0$ atรฉ $29$. function regressao(x,y,grau) V=vandermonde(x,y,grau) c=V\y #mรญnimos quadrados return c, grau end # Criaremos tambรฉm a funรงรฃo plota_regressao para plotar todas as aproxima using Polynomials using Plots function plota_regressao(regressao, grau) coefs = Float64[] p = Any[] for i=1:grau+1 #linhas push!(coefs, regressao[i]) p = Polynomial(coefs) end return p end # Com isto, podemos plotar uma aproximaรงรฃo da funรงรฃo em qualquer grau. # + c, grau = regressao(x,y,0) p0 = plota_regressao(c, 0) c, grau = regressao(x,y,1) p1 = plota_regressao(c, 1) c, grau = regressao(x,y,2) p2 = plota_regressao(c, 2) c, grau = regressao(x,y,3) p3 = plota_regressao(c, 3) scatter(x, y, c=:lightblue, ms=3, leg=false) plot!(p0, 0, 2) plot!(p1, 0, 2) plot!(p2, 0, 2) plot!(p3, 0, 2) # - # Com isto, podemos fazer o experimento de fazer uma regressรฃo de grau 29 e maior que 29. # + c, grau = regressao(x,y,29) p29 = plota_regressao(c, 29) scatter(x, y, c=:lightblue, ms=3, leg=false) println(p29) plot!(p29, 0, 2) # - # No primeiro caso, para a interpolaรงรฃo de grau 29, deverรญamos observar uma funรงรฃo passando exatamente por todos os pontos. Porรฉm, por problemas de precisรฃo por ser um polinรดmio muito grande, podemos observar distorรงoes no grรกfico conforme nos aproximamos de 29. # + c, grau = regressao(x,y,35) p35 = plota_regressao(c, 35) scatter(x, y, c=:lightblue, ms=3, leg=false) plot!(p35, 0, 2) # - # Para graus maiores que 29, podemos continuar realizando a interpolaรงรฃo, apesar das distorรงรตes. Neste caso, porรฉm, podemos observar que comeรงaremos a ter valores dos pontos iniciais que nรฃo estรฃo presentes com exatidรฃo na funรงรฃo. Isto รฉ, quando temos grau 29, todas as equaรงรตes sรฃo satisfeitas. Apรณs isto, comeรงaremos a ter equaรงรตes que deixarรฃo de ser satisfeitas, voltando a aumentar nosso erro na funรงรฃo. # Podemos entรฃo, por fim, utilizar a funรงรฃo erro_total e erro_modelos para plotar o grรกfico do erro total da funรงรฃo por grau println(p29(1)) function erro_total(x,y,modelo) n,=size(y) S=0 for i=1:n S=S+(y[i]-modelo(x[i]))^2 end return sqrt(S) end function erro_modelos(grau, modelos) erros = Any[] for i=0:grau push!(erros, erro_total(x,y,modelos[i+1])) end return erros end # + modelos = Any[] n = 30 x = range(0, 2, length=n) f(x) = (x-1)^5 y = f.(x) + randn(n)*0.05 #ruรญdo for i=0:29 c, grau = regressao(x,y,i) pn = plota_regressao(c, i) push!(modelos, pn) end y = erro_modelos(29, modelos) x = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29] plot(x, y) # - # # Exercรญcio 1.3) # Para este exercรญcio, devemos ser capazes de, utilizando as tรฉcnicas de minรญmos quadrados apresentadas em aula, classificar o filme preferido da turma na pesquisa realizada. Essa pesquisa foi classificada colocando-se a escolha dos filmes uns contra outros. Nesta pesquisa, foi obtido o seguinte resultado: # # #### Toy story 12 x 1 Rocky # #### De Volta Pro Futuro 8 x 5 Curtindo a Vida Adoidado # #### Os Incrรญveis 10 x 3 Duna # #### Batman Begins 7 x 5 Harry Potter 1 # #### Shrek 11 x 2 Duna # #### <NAME>ter 10 x 3 Rocky # #### Toy Story 9 x 4 De Volta Para o Futuro # #### Os Incrรญveis 9 x 4 Harry potter 1 # #### Curtindo a Vida Adoidado 7 x 5 Duna # #### De Volta Para o Futuro 7 x 5 Duna # #### Shrek 12 x 1 Rocky # #### Os Incrรญveis 9 x 4 Batman Begins # #### Toy Story 8 x 5 Batman Begins # #### Os Incrรญveis 10 x 3 Curtindo a vida adoidado # # Com isto, podemos mapear a relaรงรฃo entre cada um dos filmes. Para nos auxiliarmos, utilizaremos uma estrutura em grafo para melhor visualizaรงรฃo. # # ![alt text](filmes.png "Filmes") # # ##### Toy Story = A # ##### Rocky = B # ##### De Volta Pro Futuro = C # ##### Curtindo a Vida Adoidado = D # ##### Os Incrรญveis = E # ##### Duna = F # ##### Batman Begins = G # ##### <NAME> e a <NAME> = H # ##### Shrek = I # # Com isto, analisando o grafo, temos que as seguintes equaรงรตes relacionandos os filmes # # $$ # \begin{cases} # H - B = 7 \\ # A - B = 11 \\ # I - B = 11 \\ # I - F = 9 \\ # C - F = 2 \\ # A - C = 5 \\ # A - G = 3 \\ # E - G = 5 \\ # E - F = 7 \\ # D - F = 2 \\ # C - D = 3 \\ # E - D = 7 \\ # E - H = 5 \\ # G - H = 2 \\ # \end{cases} # $$ # # Assim, podemos montar as matrizes caracterรญsticas do sistema, tal que # # $$ # V = \begin{bmatrix} # 0 & -1 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ # 1 & -1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ # 0 & -1 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\ # 0 & 0 & 0 & 0 & 0 & -1 & 0 & 0 & 1 \\ # 0 & 0 & 1 & 0 & 0 & -1 & 0 & 0 & 0 \\ # 1 & 0 & -1 & 0 & 0 & 0 & 0 & 0 & 0 \\ # 1 & 0 & 0 & 0 & 0 & 0 & -1 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 1 & 0 & -1 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 1 & -1 & 0 & 0 & 0 \\ # 0 & 0 & 0 & 1 & 0 & -1 & 0 & 0 & 0 \\ # 0 & 0 & 1 & -1 & 0 & 0 & 0 & 0 & 0 \\ # 0 & 0 & 0 & -1 & 1 & 0 & 0 & 0 & 0 \\ # 0 & 0 & 0 & 0 & 1 & 0 & 0 & -1 & 0 \\ # 0 & 0 & 0 & 0 & 0 & 0 & 1 & -1 & 0 \\ # \end{bmatrix} # $$ # # $$ # y = \begin{bmatrix} # A \\ # B \\ # C \\ # D \\ # E \\ # F \\ # G \\ # H \\ # I \\ # \end{bmatrix} # $$ # # $$ # C = \begin{bmatrix} # 7 \\ # 11 \\ # 11 \\ # 9 \\ # 2 \\ # 5 \\ # 3 \\ # 5 \\ # 7 \\ # 2 \\ # 3 \\ # 7 \\ # 5 \\ # 2 \\ # \end{bmatrix} # $$ # Com o sistema em mรฃos, podemos entรฃo utilizar a funรงรฃo calcular_filme_preferido() para resolver o sistema e calcular a pontuaรงรฃo de cada filme na pesquisa obtida function calcular_filme_preferido() A = [0 -1 0 0 0 0 0 1 0; 1 -1 0 0 0 0 0 0 0; 0 -1 0 0 0 0 0 0 1; 0 0 0 0 0 -1 0 0 1; 0 0 1 0 0 -1 0 0 0; 1 0 -1 0 0 0 0 0 0; 1 0 0 0 0 0 -1 0 0; 0 0 0 0 1 0 -1 0 0; 0 0 0 0 1 -1 0 0 0; 0 0 0 1 0 -1 0 0 0; 0 0 1 -1 0 0 0 0 0; 0 0 0 -1 1 0 0 0 0; 0 0 0 0 1 0 0 -1 0; 0 0 0 0 0 0 1 -1 0] b = [7;11;11;9;2;5;3;5;7;2;3;7;5;2] x=A\b return x end calcular_filme_preferido() # Com isto, concluรญmos que o filme I ( Shrek ) foi o filme preferido na pesquisa feita com os alunos. # # Exercรญcio 1.4) # # Neste exercรญcio, devemos, dada um conjunto de pesos representando a pesagem no dia medido, estipular uma aproximaรงรฃo pro dia que serรก pesado na contagem o valor $110$. Temos que foram medidas as seguintes pesagens nos dias # # | Dia | Peso | # | --- | ---- | # 26/10 | 120,6 # 27/10 | 121,6 # 28/10 | 120,8 # 29/10 | 121,4 # 30/10 | 121,1 # 5/11 | 121,1 # 6/11 | 120,4 # 7/11 | 120,3 # 8/11 | 120,8 # 9/11 | 120,6 # 10/11 | 119,6 # 11/11 | 119,8 # 12/11 | 118,7 # 13/11 | 120,5 # 14/11 | 120,1 # 15/11 | 120,2 # 16/11 | 120,7 # 17/11 | 121,7 # 18/11 | 120,7 # 19/11 | 120,7 # 20/11 | 120,3 # 21/11 | 119,4 # 22/11 | 119,1 # 23/11 | 120,2 # 24/11 | 120,7 # 25/11 | 120,1 # 26/11 | 119,7 # 27/11 | 119,2 # 28/11 | 119,4 # 29/11 | 119,5 # 30/11 | 119 # 1/12 | 118,9 # 2/12 | 118,7 # 3/12 | 118,3 # 4/12 | 118,5 # 5/12 | 118,6 # 6/12 | 118,8 # 7/12 | 118,5 # 8/12 | 118,3 # 9/12 | 117,8 # 10/12 | 118 # 13/12 | 119 # 15/12 | 118,4 # 17/12 | 116,9 # 18/12 | 117,5 # 19/12 | 117,4 # 20/12 | 117,6 # 21/12 | 118,1 # 22/12 | 117,3 # 23/12 | 117,6 # 24/12 | 117,7 # 25/12 | 117,6 # 26/12 | 117,3 # 27/12 | 118 # 28/12 | 117,8 # 29/12 | 117,5 # 4/1 | 119 # 5/1 | 117,6 # 6/1 | 116,8 # 7/1 | 116,6 # 8/1 | 116,9 # 10/1 | 116,1 # 11/1 | 116,1 # 12/1 | 115,8 # 13/1 | 115,6 # 14/1 | 116 # 15/1 | 115,4 # 16/1 | 115,5 # 17/1 | 115,3 # # Utilizando o trecho de cรณdigo abaixo, podemos realizar a plotagem dos pontos dessa pesagem n = 69 x = [1,2,3,4,5,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,49,51,53,54,55,56,57,58,59,60,61,62,63,64,65,71,72,73,74,75,77,78,79,80,81,82,83,84] y = [120.6,121.6,120.8,121.4,121.1,121.1,120.4,120.3,120.8,120.6,119.6,119.8,118.7,120.5,120.1,120.2,120.7,121.7,120.7,120.7,120.3,119.4,119.1,120.2,120.7,120.1,119.7,119.2,119.4,119.5,119,118.9,118.7,118.3,118.5,118.6,118.8,118.5,118.3,117.8,118,119,118.4,116.9,117.5,117.4,117.6,118.1,117.3,117.6,117.7,117.6,117.3,118,117.8,117.5,119,117.6,116.8,116.6,116.9,116.1,116.1,115.8,115.6,116,115.4,115.5,115.3] scatter(x, y, c=:lightblue, ms=3, leg=false) # Podemos entรฃo utilizar as funรงรตes jรก produzidas vandermonde, regressao e plota_regressรฃo para realizar resolver o sistema proposto. Abaixo, serรก realizado o cรกlculo da regressรฃo atรฉ o grau 6 e, em seguida, calculado o erro das regressรตes para que possamos escolher algum dos casos. # + c, grau = regressao(x,y,0) p0 = plota_regressao(c, 0) c, grau = regressao(x,y,1) p1 = plota_regressao(c, 1) c, grau = regressao(x,y,2) p2 = plota_regressao(c, 2) c, grau = regressao(x,y,3) p3 = plota_regressao(c, 3) c, grau = regressao(x,y,4) p4 = plota_regressao(c, 4) c, grau = regressao(x,y,5) p5 = plota_regressao(c, 5) c, grau = regressao(x,y,6) p6 = plota_regressao(c, 6) scatter(x, y, c=:lightblue, ms=3, leg=false) plot!(p0, 0, 90) plot!(p1, 0, 90) plot!(p2, 0, 90) plot!(p3, 0, 90) plot!(p4, 0, 90) plot!(p5, 0, 90) plot!(p6, 0, 90) # - println(erro_total(x,y,p0)) println(erro_total(x,y,p1)) println(erro_total(x,y,p2)) println(erro_total(x,y,p3)) println(erro_total(x,y,p4)) println(erro_total(x,y,p5)) println(erro_total(x,y,p6)) # Como, a partir de $p1$, comeรงou-se a ter uma perda mรญnima no erro, utilizaremos este para nos dar uma aproximaรงรฃo. Assim, querermos achar um $x$ tal que $f(x) = 110$. for i=160:200 println("p1(",i,"): ",p1(i)) end # Assim, temos, aproximadamente, que o peso serรก de 110kg serรก em $x = 173$, ou seja, em 16/04. # # Exercรญcio 1.5) # # Para este exercรญcio, devemos descobrir o horรกrio ou uma interpolaรงรฃo do horรกrio em que houve um assassinato. De acordo com a descriรงรฃo, as temperaturas registradas foram # # #### 15h00: 34ยบC # #### 16h30: 30ยบC # #### 17h30: 25ยบC # # Seguindo a Lei do Resfriamento de Newton, teremos que a temperatura do corpo varia conforme: # # $F(x) = ( Ti - Tf) e^{-kx} + Tf$ # # sendo a temperatura inicial $Ti = 37$, a temperatura final $Tf = 25$. Com isto, teremos que resolver o sistema nรฃo-linear para encontrar o coeficiente $k$ e determinarmos uma funรงรฃo que encontre a hora do assassinato. # # Podemos entรฃo utilizar a funรงรฃo calcula_assassinato em conjunto com as funรงรตes regressรฃo e vandermonde para resolver o sistema nรฃo-linear fazendo a transformaรงรฃo do sistema utilizando $log$ e nos devolvendo os coeficientes da funรงรฃo. function calcula_assassinato(x, y) x_barra=x y_barra=log.(y) V=vandermonde(x_barra,y_barra,1) coefs=regressao(x_barra,y_barra,1) return coefs end # Com isto, podemos entรฃo utilizar os pontos de tempo x temperatura medidos para montarmos o sistema e procurar os coeficientes que representam a funรงรฃo esperada. # + x = [15,16.5,17.5] y = [34,30,25] coefs, _ = calcula_assassinato(x,y) c1=exp(coefs[1]) c2=coefs[2] f(x) = (37 - 25)*e^(-kx) + 25 assassinato(x)=c1*exp(c2*x) #modelo exponecial scatter(x, y, c=:lightblue, ms=3, leg=false) plot!(assassinato,10,20) # - # Com a funรงรฃo que representa o assassinato, podemos entรฃo utilizar o mรฉtodo de Newton para nos auxiliar a encontrar o valor esperado (37ยฐC) na funรงรฃo. Com isto, podemos utilizar a funรงรฃo $f(x) = c1*e^{c2*x} - 37$ e sua derivada. function newton_assassinato(iteracoes, chute) # Nรบmero de iteraรงรตes e chute inicial for i=1:iteracoes chute = chute - (c1*(exp(c2*chute)) - 37)/(((c2*c1)*exp((c2*chute)))) # Chute - f(chute)/f'(chute) end return chute # Aproximaรงรฃo do resultado. Quanto mais iteraรงรตes, maior a precisรฃo. end newton_assassinato(30, 14) # Com isto, temos que o horรกrio do assassinato corresponde a $14.424843136955248$, ou seja, aproximadamente ร s $14h25$.
Tarefa4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import networkx as nx import pandas as pd import itertools import json # + pycharm={"name": "#%%\n"} raw_data = pd.read_json('results/mutual_links.json') # + pycharm={"name": "#%%\n"} g = nx.Graph() g.add_nodes_from(raw_data.dbpedia) g.add_edges_from(( (r['dbpedia'], target) for _, r in raw_data.iterrows() for target in ([] if pd.isna(r['others']) else r['others'].split(' || ')) )) # + pycharm={"name": "#%%\n"} def get_unconnected(graph): ccs = list(nx.connected_components(graph)) not_connected = [] for cc in ccs: if len(cc) > 10: continue not_connected.extend(cc) print(f'Nodes outside large connected components: {len(not_connected)}') return not_connected suspects_unconnected = get_unconnected(g) # + [markdown] pycharm={"name": "#%% md\n"} # Remove suspects from previous step. Seek out communities in the connected graph. # + pycharm={"name": "#%%\n"} g.remove_nodes_from(suspects_unconnected) # + pycharm={"name": "#%%\n"} g_bridges = list(nx.bridges(g)) g.remove_edges_from(g_bridges) suspects_unconnected2 = get_unconnected(g) g.remove_nodes_from(suspects_unconnected2) # + pycharm={"name": "#%%\n"} communities = list(nx.community.label_propagation_communities(g)) suspects_community = [] for community in communities: if len(community) > 10: continue suspects_community.extend(community) # + pycharm={"name": "#%%\n"} results = pd.DataFrame( columns=['dbpedia', 'reason'], data=itertools.chain( ((uri, 'not connected') for uri in suspects_unconnected), ((uri, 'not connected after bridge removal') for uri in suspects_unconnected2), ((uri, 'community') for uri in suspects_community) ) ) results.groupby('reason').count() # + pycharm={"name": "#%%\n"} raw_data = raw_data.set_index('dbpedia') # + pycharm={"name": "#%%\n"} results = results.join(raw_data, on='dbpedia').drop(columns=['linkCount', 'others']) # + pycharm={"name": "#%%\n"} results_arr = [] for _, r in results.iterrows(): results_arr.append({ 'dbpedia': r['dbpedia'], 'reason': r['reason'], 'cso_topics': ['https://cso.kmi.open.ac.uk/topics/' + y for y in r['cso_topics'].split()] }) json.dump(results_arr, open('results/final_suspects.json', 'w'), indent=2)
cso/2_ext_ref_consistency/links_cc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Session 10: ILSVRC # # In this notebook we introduce another dataset that has ten categories. We # will talk about ILSVRC and its role in image analysis. # # ![ilsvrc](https://cdn-images-1.medium.com/max/1600/1*ci-wnR1A-F_RlCC_FJ6rfw.jpeg) # ## Setup # # We need to load the modules within each notebook. Here, we load the # same set as in the previous question. # + # %pylab inline import numpy as np import scipy as sp import pandas as pd import sklearn from sklearn import linear_model import urllib import os from os.path import join # + import matplotlib.pyplot as plt import matplotlib.patches as patches plt.rcParams["figure.figsize"] = (8,8) # - from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten from keras.preprocessing import image from keras.utils import to_categorical from keras.optimizers import SGD, RMSprop os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # ## Imagenette # # We are now going to look at a larger dataset with 10 different # classes. Our goal is to use neural networks to classify which # of the categories an image comes from. As usual, we have a # spreadsheet showing the metadata for each image. df = pd.read_csv(join("..", "data", "imagenette.csv")) df # Here are the ten categores of images in the dataset. # + import collections class_names = np.array(list(collections.OrderedDict.fromkeys(df.class_name.values))) class_names # - # Let's look at some of the data in the corpus. Can you tell which of the # categories each image is from. # + plt.figure(figsize=(14, 14)) idx = np.random.permutation(range(len(df)))[:15] for ind, i in enumerate(idx): plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.subplot(5, 3, ind + 1) img = imread(join('..', 'images', 'imagenette', df.filename[i])) plt.imshow(img) plt.axis("off") # - # ## Loading the dataset # # As before, we also have to read in the dataset using a tandardized image size. # + img_list = [] for i in range(len(df)): img_path = join("..", "images", "imagenette", df.filename[i]) img = image.load_img(img_path, target_size=(32, 32)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) img_list.append(x) X = np.vstack(img_list) / 255 y = np.int32(df.class_num) # - X.shape # And here is a model similiar to the one from the last session. # Notice that the number of parameters is much higher. # + model = Sequential() model.add(Conv2D(32, input_shape=X.shape[1:], kernel_size=(3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(units=10, activation="softmax")) # - model.summary() # The next few steps are also similar to the previous session. We compile # the model, construct training/testing sets, and fit the dataset. model.compile(loss='sparse_categorical_crossentropy', optimizer=SGD(lr=0.03, momentum=0.8, decay=0.0, nesterov=True), metrics=['accuracy']) X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y) help(sklearn.model_selection.train_test_split) X_train.dtype model.fit(X_train, y_train, epochs=25, batch_size=32, validation_data=(X_test, y_test)) # How well does the model produce predictions? # What categories are the hardest to tell apart? # + from sklearn.metrics import confusion_matrix yhat = model.predict_classes(X_test) pd.crosstab(class_names[yhat], class_names[y_test]) # - # What images are not working as expected? # + plt.figure(figsize=(14, 14)) yhat = model.predict_classes(X) idx = np.where((yhat != y) & (y == 3))[0][:15] for ind, i in enumerate(idx): plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.subplot(5, 3, ind + 1) img = imread(join('..', 'images', 'imagenette', df.filename[i])) plt.imshow(img) plt.text(0.5, 0.5, class_names[yhat[i]], fontsize=24, color="orange") plt.axis("off") # -
nb/session10-multiclass-nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:notebook] * # language: python # name: conda-env-notebook-py # --- # <img width="50" src="https://carbonplan-assets.s3.amazonaws.com/monogram/dark-small.png" style="margin-left:0px;margin-top:20px"/> # # # Figure 3: Future projections for each impact # # _Author: <NAME>_ # # The methods below conduct the analyses to calculate the fire risk change factors # included the manuscript <<< insert doi >>>. # # + # %load_ext autoreload # %autoreload 2 from carbonplan_forest_risks import load, setup, plot, fit, utils, prepare, collect import xarray as xr import numpy as np from carbonplan_forest_risks.utils import get_store # - coarsen = 4 mask = ( ( load.nlcd(store="az", year=2001).sel(band=[41, 42, 43, 90]).sum("band") > 0.25 ) .astype("float") .coarsen(x=coarsen, y=coarsen, boundary="trim") .mean() ) # # Load in fire data # historical_fire = xr.open_zarr( get_store( "carbonplan-forests", "risks/results/paper/fire_terraclimate_v6.zarr" ) ).load() fire_mask = ~np.isnan(historical_fire.historical.isel(time=0).drop("time")) ds = ( xr.open_zarr( get_store("carbonplan-forests", "risks/results/paper/fire_cmip.zarr") ) .assign_coords({"x": mask.x, "y": mask.y}) .where(fire_mask) .groupby("time.year") .sum() .where(fire_mask) .mean(dim=["x", "y"]) .probability.mean(dim="gcm") .compute() ) for scenario in ["ssp245", "ssp370", "ssp585"]: ts = ds.sel(scenario=scenario) hist_slice = slice(1990, 2019) fut_slice = slice(2080, 2099) change = ( ts.sel(year=fut_slice).mean(dim="year") / ts.sel(year=hist_slice).mean(dim="year").values ) print("change factor for {} is {}".format(scenario, change.values))
notebooks/paper/Calculations/change-factor-calculations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:cil2_demos] # language: python # name: conda-env-cil2_demos-py # --- # + tags=[] # Import libraries from cil.optimisation.functions import TotalVariation from cil.utilities import dataexample, noise from skimage.metrics import structural_similarity as ssim from skimage.metrics import peak_signal_noise_ratio as psnr import matplotlib.pyplot as plt # + # Load Rainbow data data = dataexample.RAINBOW.get() data.reorder(['horizontal_y', 'horizontal_x','channel']) noisy_data = noise.gaussian(data, seed = 10, var = 0.02) alpha = 0.15 TV = alpha * TotalVariation(max_iteration=500) proxTV = TV.proximal(noisy_data, tau=1.0) # + tags=[] all_im = [data, noisy_data, proxTV] ssim_res = ssim(data.as_array(), proxTV.as_array(), data_range=data.max()-data.min(), multichannel=True) psnr_res = psnr(data.as_array(), proxTV.as_array(), data_range=data.max()-data.min()) title = ['Ground Truth', 'Noisy data', 'TV alpha={}, PSNR/SSIM = {:.3f}, {:.3f}'.format(alpha, psnr_res,ssim_res)] for i in range(len(all_im)): plt.figure(figsize=(15,18)) plt.axis('off') ax = plt.gca() tmp = ax.imshow(all_im[i].as_array()) plt.title(title[i]) # + tags=[]
CaseStudy_ColourProcessing/ColorDenoising.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''.venv'': pipenv)' # name: python3 # --- # + import os import warnings import matplotlib.pyplot as plt import numpy as np import pandas as pd from pyptax import ptax import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.model_selection import ( cross_val_score, RepeatedStratifiedKFold,RandomizedSearchCV, GridSearchCV, train_test_split ) from sklearn.metrics import mean_absolute_error, mean_squared_error import yfinance as yf warnings.filterwarnings("ignore") pd.set_option('display.max_rows', None) pd.set_option('display.max_rows', 500) RANDOM_SEED = 42 # - start_date = "2020-01-01" end_date = "2021-07-31" # + historical_bulletin = ptax.historical(start_date, end_date) df_historical_bulletin = pd.DataFrame.from_dict(historical_bulletin.as_dict["bulletins"], orient='columns') df_historical_bulletin["datetime"] = df_historical_bulletin["datetime"].astype('datetime64[ns]').dt.date df_historical_bulletin.set_index('datetime', inplace=True) ticker = 'iBovespa' df_ticker = yf.download('^BVSP', start=start_date, end=end_date) df_ticker.index = df_ticker.index.astype('datetime64[ns]') df_historical_bulletin.head() df_ticker.head() # - df_merged = df_ticker[["Open", "Close"]].merge(df_historical_bulletin[["bid", "ask"]], left_on='Date', right_on='datetime') df_merged.head() df_historical_bulletin.head() # + fig = plt.figure(figsize=(12, 3)) ax = fig.add_subplot(111) ax.plot(df_historical_bulletin['ask'], label='ask') date_min = df_historical_bulletin.index.min() date_max = df_historical_bulletin.index.max() ax.xaxis.set_major_locator(plt.MaxNLocator(30)) ax.set_xlim(left=date_min, right=date_max) ax.set_title(f"""ptax ask - de {date_min.strftime("%d/%m/%Y")} a {date_max.strftime("%d/%m/%Y")} -""", fontsize = 12) ax.legend(loc='upper left', frameon=False) plt.xticks(rotation=90) plt.show(); # - # + fig = plt.figure(figsize=(12, 3)) ax = fig.add_subplot(111) ax.plot(df_ticker['Close'], label=ticker) date_min = df_ticker.index.min() date_max = df_ticker.index.max() ax.xaxis.set_major_locator(plt.MaxNLocator(30)) ax.set_xlim(left=date_min, right=date_max) ax.set_title(f"""รndice {ticker} - de {date_min.strftime("%d/%m/%Y")} a {date_max.strftime("%d/%m/%Y")} -""", fontsize = 12) ax.legend(loc='upper left', frameon=False) plt.xticks(rotation=90) plt.show(); # -
notebooks/ptax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Publishing SQL Data as JSON Documents # ## Publishing JSON # Up to this point, we have focused on JSON functions that allow you to extract values, objects, and arrays from documents. There are many circumstances where you want to be able to take the existing data in a table and make it available to outside world as JSON data. Publishing data as JSON is particularly useful when sending results back to an application that expects JSON. # # Db2 provides two of the ISO SQL JSON publishing functions: `JSON_ARRAY` and `JSON_OBJECT`. The combination of these two functions can provide a way of generating most JSON documents. # ### Load Db2 Extensions and Connect to the Database # The `connection` notebook contains the `CONNECT` statement which allows access to the `SAMPLE` database. If you need to modify the connection information, edit the `connection.ipynb` notebook. # %run db2.ipynb # %run connection.ipynb # ### Publishing Individual Values with JSON_OBJECT # The `JSON_OBJECT` function will generate a JSON object by creating `key:value` pairs. Using the ubiquitous `EMPLOYEE` table in the Db2 `SAMPLE` database, we can generate a JSON object that contains an employeeโ€™s first name through the following `JSON_OBJECT` function. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT(KEY 'lastname' VALUE lastname) # FROM EMPLOYEE # FETCH FIRST ROW ONLY # - # The `JSON_OBJECT` function can have multiple levels of objects within it so you could create an object that contains an individual's entire name. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'identity' # VALUE JSON_OBJECT( # KEY 'firstname' VALUE FIRSTNME, # KEY 'lastname' VALUE LASTNAME # ) # ) # FROM EMPLOYEE # FETCH FIRST ROW ONLY; # - # You can generate extremely complex documents just using the `JSON_OBJECT` function. This function is complemented by the `JSON_ARRAY` function which allows grouping of values into an array. The syntax of the `JSON_OBJECT` function is shown below. # ***JSON_OBJECT Syntax*** # ![JSON_VALUE](media/JSON_OBJECT.png) # # ***Key Expression*** # ![JSON_VALUE](media/jo-key-expression.png) # # ***Null Clause*** # ![JSON_VALUE](media/jo-null-clause.png) # # ***Unique Clause*** # ![JSON_VALUE](media/jo-unique-clause.png) # # ***Returning Clause*** # ![JSON_VALUE](media/jo-returning-clause.png) # What differentiates the `JSON_OBJECT` from other functions is that the `null-clause`, `unique-clause`, and `returning-clause` are for the entire block of `key-value` definitions not each individual one. # ### Key Value Clause # The first clause is used to create the key-value pairs that you want published. You can create one or more key-value pairs, including the ability to nest objects. # # ![JSON_OBJECT](media/jo-key-expression.png) # # The `key-value` field represents the first field in a JSON object: # ```json # "first_name": "Hronis" # ``` # # The second field, *json-expression*, is the value associated with the key. The *json-expression* can be a reference to a column from a table, a constant, or a variable as long as it is not one of the following data types: # * GRAPHIC, VARGRAPHIC # * DBCLOB # * BINARY # * CHAR FOR BIT DATA, VARCHAR FOR BIT DATA # * XML # # The simple example below will create a JSON document using some character constants. # %sql -j VALUES JSON_OBJECT( KEY 'name' VALUE 'Bird'); # You can use any of the valid Db2 data types in the `VALUE` clause as well as create multiple key-value pairs. # %sql -j VALUES JSON_OBJECT( KEY 'name' VALUE 'Bird', KEY 'salary' VALUE 95000); # ### Nested Key-value Expressions # The previous section illustrated the use of `JSON_OBJECT` to create a single-level document (no nesting). If you wanted to create a nested structure, then all you have to do is start another `JSON_OBJECT` function. The `VALUE` of the key is simply another `JSON_OBJECT` function call. # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( KEY 'foreword' VALUE # JSON_OBJECT( KEY 'primary' VALUE # JSON_OBJECT( KEY 'first_name' VALUE 'Thomas', # KEY 'last_name' VALUE 'Hronis' # ) # FORMAT JSON # ) # FORMAT JSON # ); # - # There are no limits to the depth of nesting that you can code but it becomes difficult to keep track of levels as you build more complex `JSON_OBJECT` expressions! # ### FORMAT JSON versus FORMAT BSON # Before discussing the use of the `FORMAT` clause, we need to understand the default output produced by the `JSON_OBJECT` function. The default output from the function is a character string that is surrounded by curly braces {} to form a proper JSON document. # %sql -j VALUES JSON_OBJECT(KEY 'name' VALUE 'Bird'); # When nesting a call to `JSON_OBJECT` within another one, by default, the upper `JSON_OBJECT` function places double quotes around the results from the inner function since it is returning a character string and these must be double-quoted by JSON format rules. For instance, the previous nested object example without any `FORMAT` clause produces the following output: # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( # KEY 'author' VALUE # JSON_OBJECT( # KEY 'first_name' VALUE 'Thomas', # KEY 'last_name' VALUE 'Hronis' # ) # ); # - # This rather strange output is caused by two things that are occurring. First, the output from the second `JSON_OBJECT` function would be: # + magic_args="-j " language="sql" # VALUES JSON_OBJECT( # KEY 'first_name' VALUE 'Thomas', # KEY 'last_name' VALUE 'Hronis' # ); # - # Since `FORMAT JSON` was not specified for the value in this `JSON_OBJECT` call, this output value is recognized only as a character string, not a JSON object, when it is passed as the proposed value for the key "author" in the first `JSON_OBJECT` function. As such, JSON format rules demand that the character value be enclosed in double quotes. And since this string itself contains double quote characters, the internal quote characters need to be escaped with the backslash character. # ```json # "{\"first_name\":\"Thomas",\"last_name\":\"Hronis\"}" # ``` # # Now this string is a proper JSON character value and can be used as the value in the first key-value pair to produce the final result: # ```json # {"author":"{\"first_name\":\"Thomas\",\"last_name\":\"Hronis\"}"} # ``` # The `FORMAT` clause can be used to eliminate the external quotes and escape characters by indicating that the value to be returned by the nested `JSON_OBJECT` is actually already in valid JSON format and does not need to be treated as a character string. # # The following version of the example indicates that the value returned by the nested `JSON_OBJECT` call is in JSON format and would produce the desired output: # + magic_args="-j " language="sql" # VALUES JSON_OBJECT( # KEY 'author' VALUE # JSON_OBJECT( # KEY 'first_name' VALUE 'Thomas', # KEY 'last_name' VALUE 'Hronis' # ) # FORMAT JSON # ); # - # This `FORMAT` clause can be used when creating multi-level objects but not for simple values. If you use the clause with a single key-value pair, you will get an error. # %sql -j VALUES JSON_OBJECT( KEY 'name' VALUE Bird FORMAT JSON) # This is because Bird by itself without the quotes is not in valid JSON format. As a JSON string data type, it should be enclosed in double quotes (e.g."Bird"). Recall our discussion on valid JSON format at the beginning of our journey! # # The `FORMAT BSON` option will publish the key-value pair in binary format while `FORMAT JSON` will publish it character format. # # The following example illustrates the difference between using FORMAT JSON and no formatting directive. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'identity' VALUE # JSON_OBJECT( # KEY 'firstname' VALUE FIRSTNME, # KEY 'lastname' VALUE LASTNAME # ) # FORMAT JSON # ) # FROM EMPLOYEE FETCH FIRST ROW ONLY; # - # By using the `FORMAT JSON` clause, we are able to eliminate the quoted strings from the output. If you are planning to publish the data back to an application that needs to process JSON, then you should always use `FORMAT JSON` (or `BSON`) to create the proper formatting for nested objects. # ### NULL Handling # The `NULL` option on the `JSON_OBJECT` function is used to handle values that are null when retrieved from a table. # # ![JSON_OBJECT](media/jo-null-clause.png) # # The default setting is `NULL ON NULL` which will publish the key-value pair even if the value is null. # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( # KEY 'name' VALUE null, # KEY 'salary' VALUE 95000 # NULL ON NULL # ); # - # In this case, when a null value was encountered in the result, the function put the JSON special word null in the output. Setting `ABSENT ON NULL` will prevent the key-value pair from being included in the output. # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( # KEY 'name' VALUE null, # KEY 'salary' VALUE 95000 # ABSENT ON NULL # ); # - # ### KEYS # A best practice in generating key-value pairs is not to duplicate a key name at the same level. If there are duplicate keys within a document, there is no guarantee of which one will be chosen when you attempt to retrieve it. # # ![JSON_VALUE](media/jo-unique-clause.png) # # The following `JSON_OBJECT` example creates two key-value pairs with the same key. # + magic_args="-json" language="sql" # VALUES JSON_OBJECT( # KEY 'name' VALUE 'Thomas', # KEY 'name' VALUE 'Hronis' # ); # - # The default behavior is to ignore duplicate keys (`WITHOUT UNIQUE KEYS`) and so the above example will not generate an error. If you are positive that there will not be duplicate keys in your JSON, then you should leave this as the default since it will result in less overhead in the function. # # When `WITH UNIQUE KEYS` is specified as part of the syntax, the function will raise an error code of -16407 for the above example. # + magic_args="-json" language="sql" # VALUES JSON_OBJECT( # KEY 'name' VALUE 'Thomas', # KEY 'name' VALUE 'Hronis' # WITH UNIQUE KEYS # ); # - # Note that duplicate keys can exist at different levels in an object and within arrays, as long as they have a unique JSON path expression. # ```json # { # "authors": [ # {"first_name": "Paul", "last_name" : "Bird"}, # {"first_name": "George","last_name" : "Baklarz"} # ], # "foreword": { # "primary": # { # "first_name": "Thomas", # "last_name" : "Hronis" # } # } # } # ``` # ### RETURNING Clause # The `RETURNING` clause is used to define how the final JSON document is to be returned to the application. # #### Returning Clause # ![JSON_OBJECT](media/jo-returning-clause.png) # # By default, the document is returned as a `CLOB` object, but you can use `CHAR`, `VARCHAR`, `CLOB`, `VARBINARY`, or `BLOB`. If you are returning the data as a character string, you must specify `FORMAT JSON` or use `FORMAT BSON` for a binary string. # # If you supply too small of a data type, then the function will fail with an error message: # ```sql # SQL0137N The length resulting from "SYSIBM.JSON_OBJECT" is greater than "20". SQLSTATE=54006 SQLCODE=-137 # ``` # # An error will also be produced if you try to return a `BSON` value into a character string: # ```sql # SQL0171N The statement was not processed because the data type, length or value of the argument for the parameter in position "5" of routine "SYSIBM.JSON_OBJECT" is incorrect. Parameter name: "". SQLSTATE=42815 SQLCODE=-171 # ``` # ### Publishing Array Values with JSON_ARRAY # `JSON_OBJECT` is able to create complex JSON documents from data within a table, but it is not able to generate arrays. In order to create arrays, we must use the `JSON_ARRAY` function. # # ***JSON_ARRAY Syntax*** # ![JSON_OBJECT](media/JSON_ARRAY.png) # # ***JSON Expression*** # ![JSON_OBJECT](media/ja-json-expression.png) # # ***Full Select*** # ![JSON_OBJECT](media/ja-full-select.png) # # ***Null Clause*** # ![JSON_OBJECT](media/ja-null-clause.png) # # ***Returning Clause*** # ![JSON_OBJECT](media/ja-returning-clause.png) # There are two forms of the `JSON_ARRAY` function. The first version is similar to the `JSON_OBJECT` function where you supply a list of values to create an object. # # There is no key associated with a JSON array, so you only need to supply the list of values that you want in there. # %sql -j VALUES JSON_ARRAY( 1523, 902, 'Thomas', 7777); # JSON array elements do not need to have the same data type โ€“ they can even contain other objects. Here is an example of a `JSON_OBJECT` being inserted into an array. # + magic_args="-j" language="sql" # VALUES JSON_ARRAY(1523, 902, # JSON_OBJECT( KEY 'lastname' VALUE 'Bird') FORMAT JSON, # 7777); # - # While the `JSON_ARRAY` function can be used by itself, it produces a JSON array value not a valid JSON object. The output from this function is meant to be used as part of a `JSON_OBJECT` structure. # # The second form of the `JSON_ARRAY` function uses the results of a SQL select statement to build the array values. # # Only one `SELECT` statement can be used in the body of the function โ€“ you cannot have multiple `SELECT` commands in a list! If you do need to create an array from multiple sources, you should look at using a `SELECT` statement with `UNION` to create one list of items. # # The following example publishes all of the department numbers for the departments that start with the letter B. # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( # KEY 'departments' VALUE # JSON_ARRAY(SELECT DEPTNO FROM DEPARTMENT # WHERE DEPTNAME LIKE 'B%') # FORMAT JSON # ); # - # The `SELECT` statement can only return one column, otherwise an error message will be raised. # ```sql # SQL0412N Multiple columns are returned from a subquery that is allowed only one column. SQLSTATE=42823 # SQLCODE=-412 # ``` # # If you do want to create an array of objects, you could use nested table expressions (or inline SQL) to generate the objects that you want. For instance, consider the `DEPARTMENT` table that we were using in the previous example. Perhaps you want to create an individual document to list all of the departments in the company, the document would look similar to the following. # ```json # { # "departments" : [ # { # "deptno" : "A01", # "deptname" : "Purchasing" # }, # { # "deptno" : "B01", # "deptname" : "Accounts" # }, # โ€ฆ # ] # } # ``` # The `JSON_ARRAY` function can only work with one value, but what if we generate the object as part of another SQL statement? The `WITH` clause allows us to create the JSON document outside of the SQL that is publishing the data. # + magic_args="-j " language="sql" # WITH DEPARTMENTS(DEPT) AS # ( # SELECT JSON_OBJECT( # KEY 'deptno' VALUE D.DEPTNO, # KEY 'deptname' VALUE D.DEPTNAME # ) # FROM DEPARTMENT D # ORDER BY D.DEPTNO # ) # SELECT * FROM DEPARTMENTS # - # Now we can select from the nested table expression as part of the `JSON_ARRAY` function to create an array of objects. # + magic_args="-j" language="sql" # WITH DEPARTMENTS(DEPT) AS # ( # SELECT JSON_OBJECT( # KEY 'deptno' VALUE D.DEPTNO, # KEY 'deptname' VALUE D.DEPTNAME # ) # FROM DEPARTMENT D # ORDER BY D.DEPTNO # ) # SELECT JSON_OBJECT( # KEY 'departments' VALUE # JSON_ARRAY( # SELECT DEPT FROM DEPARTMENTS # FORMAT JSON # ) # FORMAT JSON # ) # FROM SYSIBM.SYSDUMMY1; # - # ### FORMAT JSON versus FORMAT BSON # The `JSON_OBJECT` function had an extensive explanation of `FORMAT JSON` versus `FORMAT BSON`. If you are imbedding the `JSON_ARRAY` inside of a `JSON_OBJECT` function, then you should use `FORMAT JSON` to ensure the proper formatting of the document. The previous example which showed how to publish department numbers would have returned the following if `FORMAT JSON` was not used. # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( # KEY 'departments' VALUE # JSON_ARRAY(SELECT DEPTNO FROM DEPARTMENT # WHERE DEPTNAME LIKE 'B%') # ); # - # ### NULL Handling # The `NULL` option on the `JSON_ARRAY` function is used to handle arrays values that are null when retrieved from a table. # # ![JSON_OBJECT](media/ja-null-clause.png) # # The default setting is `ABSENT ON NULL` which will ignore null values. Note that this is different from `JSON_OBJECT` which uses `NULL ON NULL` as the default. # %sql -j VALUES JSON_ARRAY(1523, null); # If you really do want the `null` value, then you should include the `NULL ON NULL` option. # %sql -j VALUES JSON_ARRAY(1523, null NULL ON NULL); # There is a potential that your SQL will not work if no values are returned by the `SELECT` statement. The `JSON_ARRAY` function expects at least one value to be returned (even a null value) in order to generate the array so you have to create a query to cover this possibility. The following SQL illustrates one technique that can be used. # + magic_args="-j" language="sql" # VALUES JSON_OBJECT( # KEY 'departments' VALUE # JSON_ARRAY( # VALUES NULL # UNION ALL # SELECT DEPTNO FROM DEPARTMENT # WHERE DEPTNAME LIKE 'Z%' # ) # FORMAT JSON # ); # - # The `VALUES NULL UNION ALL` will generate at least one value that is `null` in the list and then the `JSON_ARRAY` function can ignore it (remember that `ABSENT ON NULL` is the default behavior!) and generate an empty array. # ### RETURNING Clause # The `RETURNING` clause is used to define how the final array is to be returned to the application. # ![JSON_OBJECT](media/ja-returning-clause.png) # # By default, the document is returned as a `CLOB` object, but you can also use `CHAR`, `VARCHAR`, `CLOB`, `VARBINARY`, or `BLOB`. If you want the data to be in proper JSON format, then use `FORMAT JSON`. `FORMAT BSON` is not available as part of `JSON_ARRAY` since the expectation is that you will be using it within a `JSON_OBJECT` function and that handles the `FORMAT BSON` conversion. # # If you supply too small of a data type, then the function will fail with an error message: # ```sql # SQL0137N The length resulting from "SYSIBM.JSON_OBJECT" is greater than "20". SQLSTATE=54006 SQLCODE=-137 # ``` # ### Publishing Example # The first step is to decide what type of document you want to create. The `SAMPLE database` has the `EMPLOYEE` and `DEPARTMENT` table and we want to be able to publish a document that follows this format. # ```json # { # "empno" : "0001", # "personal" : { # "first_name":"name","middle_initial":"x", # "last_name":"name","sex":"m","birthdate":"1999-01-01" # }, # "compensation" : {"salary":50000,"bonus":4500,"commission":500}, # "position" : {"job":"worker","deptno":"A01","department":"cleaning"}. # "manages" : ["A01"]} # } # ``` # # The last field (manager) is a list of the departments that the individual manages. It should be empty if the employee is not a manager. # # The following fields are available to help us build the JSON document. # * Employee number (EMPNO) # * Personal information (FIRSTNME, MIDINIT, LASTNAME, SEX, BIRTHDATE) # * Compensation (SALARY, BONUS, COMMISSION) # * Job details (JOB, WORKDEPT, DEPARTMENT NAME) # * Departments managed # # All of the employee information comes from the `EMPLOYEE` table, while the department name is found in the DEPARTMENT table. The tricky portion is determining what departments report to a manager. # # There is a column in the `DEPARTMENT` table that gives us the manager number (`MGRNO`), their base department number (`DEPTNO`) and what the administrative department (or higher-level department) that manages them. # # The following `SELECT` statement will give us the manager numbers and the departments that report to them (including their own). # + language="sql" # SELECT D1.MGRNO, D1.DEPTNO FROM DEPARTMENT D1 # WHERE D1.MGRNO IS NOT NULL # UNION # SELECT D1.MGRNO, D2.DEPTNO FROM DEPARTMENT D1, DEPARTMENT D2 # WHERE D2.ADMRDEPT = D1.DEPTNO AND D1.MGRNO IS NOT NULL # ORDER BY MGRNO; # - # The JSON document we want to create can be broken down into five parts: # * Employee number # * Personal information # * Compensation # * Job Details # * Departments managed # # The empno field is straightforward since it is a single value derived from the `EMPNO` field in the `EMPLOYEE`. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'empno' VALUE E.EMPNO # ) # FROM EMPLOYEE E FETCH FIRST ROW ONLY; # # { # "empno":"000010" # } # - # The next three fields are JSON objects that we will need to create from a combination of the data in the `EMPLOYEE` and `DEPARTMENT` tables. The personal field is an object with five values. To create this object, we use the following JSON_OBJECT function. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'first_name' VALUE E.FIRSTNME, # KEY 'middle_initial' VALUE E.MIDINIT, # KEY 'last_name' VALUE E.LASTNAME, # KEY 'sex' VALUE E.SEX, # KEY 'birthdate' VALUE E.BIRTHDATE # NULL ON NULL # ) # FROM EMPLOYEE E FETCH FIRST ROW ONLY; # - # This object needs to be nested into the SQL statement we are building for our desired JSON document. The next example shows the two SQL statements merged together. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'empno' VALUE E.EMPNO, # KEY 'personal' VALUE # JSON_OBJECT( # KEY 'first_name' VALUE E.FIRSTNME, # KEY 'middle_initial' VALUE E.MIDINIT, # KEY 'last_name' VALUE E.LASTNAME, # KEY 'sex' VALUE E.SEX, # KEY 'birthdate' VALUE E.BIRTHDATE # ) # FORMAT JSON # ) # FROM EMPLOYEE E FETCH FIRST ROW ONLY; # - # We have to remember to add the `FORMAT JSON` clause to remove the escape characters for any double quotes in the result from the upper `JSON_OBJECT` function. # # The same process has to be used to create the `JSON_OBJECT` functions for the two other sections (compensation and position). To generate the department name, we must add a join in the SQL between the `EMPLOYEE` table and the `DEPARTMENT` table in the `WHERE` clause. # # The SQL to generate everything except the manager list is found below: # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'empno' VALUE E.EMPNO, # KEY 'personal' VALUE # JSON_OBJECT( # KEY 'first_name' VALUE E.FIRSTNME, # KEY 'middle_initial' VALUE E.MIDINIT, # KEY 'last_name' VALUE E.LASTNAME, # KEY 'sex' VALUE E.SEX, # KEY 'birthdate' VALUE E.BIRTHDATE # ) # FORMAT JSON, # KEY 'compensation' VALUE # JSON_OBJECT( # KEY 'salary' VALUE E.SALARY, # KEY 'bonus' VALUE E.BONUS, # KEY 'commission' VALUE E.COMM # ) # FORMAT JSON, # KEY 'position' VALUE # JSON_OBJECT( # KEY 'job' VALUE E.JOB, # KEY 'deptno' VALUE E.WORKDEPT, # KEY 'department' VALUE D.DEPTNAME # ) # FORMAT JSON # ) # FROM EMPLOYEE E, DEPARTMENT D # WHERE D.DEPTNO = E.WORKDEPT FETCH FIRST ROW ONLY; # - # Finally, we need to add the list of departments that a manager is responsible for. The code for determining this was shown earlier. In order to get this into the JSON document, we must use the `JSON_ARRAY` function with the imbedded SQL statement. This SQL snippet shows the results from manager `E.EMPNO = '000010'`. # + magic_args="-j" language="sql" # SELECT JSON_ARRAY( # VALUES NULL # UNION # SELECT D1.DEPTNO FROM DEPARTMENT D1 WHERE D1.MGRNO = E.EMPNO # UNION # SELECT D2.DEPTNO FROM DEPARTMENT D1, DEPARTMENT D2 # WHERE D2.ADMRDEPT = D1.DEPTNO AND D1.MGRNO = E.EMPNO # ) FROM EMPLOYEE E # WHERE E.EMPNO = '000010' # - # We need to add the `VALUES NULL` clause to make sure we have a null array created in the event the employee is not a manager. The final SQL is found below. # + magic_args="-j" language="sql" # SELECT JSON_OBJECT( # KEY 'empno' VALUE E.EMPNO, # KEY 'personal' VALUE # JSON_OBJECT( # KEY 'first_name' VALUE E.FIRSTNME, # KEY 'middle_initial' VALUE E.MIDINIT, # KEY 'last_name' VALUE E.LASTNAME, # KEY 'sex' VALUE E.SEX, # KEY 'birthdate' VALUE E.BIRTHDATE # ) # FORMAT JSON, # KEY 'compensation' VALUE # JSON_OBJECT( # KEY 'salary' VALUE E.SALARY, # KEY 'bonus' VALUE E.BONUS, # KEY 'commission' VALUE E.COMM # ) # FORMAT JSON, # KEY 'position' VALUE # JSON_OBJECT( # KEY 'job' VALUE E.JOB, # KEY 'deptno' VALUE E.WORKDEPT, # KEY 'department' VALUE D.DEPTNAME # ) # FORMAT JSON, # KEY 'manages' VALUE # JSON_ARRAY( # VALUES NULL # UNION # SELECT D1.DEPTNO FROM DEPARTMENT D1 # WHERE D1.MGRNO = E.EMPNO # UNION # SELECT D2.DEPTNO FROM DEPARTMENT D1, # DEPARTMENT D2 # WHERE D2.ADMRDEPT = D1.DEPTNO AND # D1.MGRNO = E.EMPNO # ) # FORMAT JSON # ) # FROM EMPLOYEE E, DEPARTMENT D # WHERE D.DEPTNO = E.WORKDEPT; # - # ## Summary # The `JSON_OBJECT` and `JSON_ARRAY` functions can be used to create JSON documents from data in relational tables. The `JSON_OBJECT` function can be used to create complex objects, by nesting other `JSON_OBJECT` and `JSON_ARRAY` calls. # # To create a JSON document, you need to decide what the structure of your document will look like, the fields that you want to publish, and also decide what other tables you may need to create arrays. # # The best way to publish relational data as JSON is to break up the document into multiple sections to create and test them individually before combining them all together to get the desired end result. # #### Copyright (C) IBM 2021, <NAME> [<EMAIL>]
Db2_11.5_JSON_09_Publishing_JSON_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import sys import random import numpy as np import cv2 from PIL import Image as im from IPython.display import clear_output #๋™์˜์ƒ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ cap = cv2.VideoCapture('/Users/jungjihoon/Library/Mobile Documents/com~apple~CloudDocs/00_DataScience/แ„†แ…ตแ†ซแ„’แ…งแ†ผแ„€แ…ต/vtest.avi') # + # ๋ณดํ–‰์ž ๊ฒ€์ถœ์„ ์œ„ํ•œ HOG ๊ธฐ์ˆ ์ž ์„ค์ • hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) while True: ret, frame = cap.read() if not ret: break # ๋งค ํ”„๋ ˆ์ž„๋งˆ๋‹ค ๋ณดํ–‰์ž ๊ฒ€์ถœ detected, _ = hog.detectMultiScale(frame) # ๊ฒ€์ถœ ๊ฒฐ๊ณผ ํ™”๋ฉด ํ‘œ์‹œ for (x, y, w, h) in detected: c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) cv2.rectangle(frame, (x, y, w, h), c, 3) cv2.imshow('frame',frame) if cv2.waitKey(10) == 27: break cv2.destroyAllWindows()
opencv_Histogram_of_Oriented_Gradients(HOG)_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="FWkuJabJSKGB" import sys # Confirm that we're using Python 3 assert sys.version_info.major is 3, 'Oops, not running Python 3. Use Runtime > Change runtime type' # + colab={"base_uri": "https://localhost:8080/"} id="dzLKpmZICaWN" outputId="cf8aca9b-c80d-4547-80c3-20b0db56fbdd" # TensorFlow and tf.keras print("Installing dependencies for Colab environment") # !pip install -Uq grpcio==1.32.0 import tensorflow as tf from tensorflow import keras # Helper libraries import numpy as np import matplotlib.pyplot as plt import os import subprocess print('TensorFlow version: {}'.format(tf.__version__)) # + colab={"base_uri": "https://localhost:8080/"} id="7MqDQO0KCaWS" outputId="b86dc51a-3a68-47c9-d408-6f2ce75d0326" fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() # scale the values to 0.0 to 1.0 train_images = train_images / 255.0 test_images = test_images / 255.0 # reshape for feeding into the model train_images = train_images.reshape(train_images.shape[0], 28, 28, 1) test_images = test_images.reshape(test_images.shape[0], 28, 28, 1) class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] print('\ntrain_images.shape: {}, of {}'.format(train_images.shape, train_images.dtype)) print('test_images.shape: {}, of {}'.format(test_images.shape, test_images.dtype)) # + colab={"base_uri": "https://localhost:8080/"} id="RlboWCldtc5_" outputId="ae5200e2-03c4-404d-9aac-bfd3323704f8" model = keras.Sequential([ keras.layers.Conv2D(input_shape=(28,28,1), filters=8, kernel_size=3, strides=2, activation='relu', name='Conv1'), keras.layers.Flatten(), keras.layers.Dense(10, activation=tf.nn.softmax, name='Softmax') ]) model.summary() testing = False epochs = 15 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=epochs) test_loss, test_acc = model.evaluate(test_images, test_labels) print('\nTest accuracy: {}'.format(test_acc)) # + colab={"base_uri": "https://localhost:8080/"} id="0w5Rq8SsgWE6" outputId="9a361ff0-7681-41ed-e908-5fdecc2137d6" # Fetch the Keras session and save the model # The signature definition is defined by the input and output tensors, # and stored with the default serving key import tempfile MODEL_DIR = tempfile.gettempdir() version = 1 export_path = os.path.join(MODEL_DIR, str(version)) print('export_path = {}\n'.format(export_path)) tf.keras.models.save_model( model, "API/Fashion/1", overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None ) print('\nSaved model:') # !ls -l {export_path} # + colab={"base_uri": "https://localhost:8080/", "height": 422} id="TYlTrmRMBiKY" outputId="4435b7e9-54b4-4fbc-ad1e-a39fe9b02398" tf.keras.utils.plot_model( model, to_file='model.png', show_shapes=True, show_dtype=True, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96 ) # + colab={"base_uri": "https://localhost:8080/"} id="LU4GDF_aYtfQ" outputId="bccbeac0-f3a1-4394-d1ca-125d0aa9a737" # !saved_model_cli show --dir {export_path} --all # + colab={"base_uri": "https://localhost:8080/", "height": 301} id="Luqm_Jyff9iR" outputId="1ab19984-039e-47a7-81f5-e3326aee3d93" def show(idx, title): plt.figure() plt.imshow(test_images[idx].reshape(28,28)) plt.axis('off') plt.title('\n\n{}'.format(title), fontdict={'size': 16}) import random rando = random.randint(0,len(test_images)-1) show(rando, 'An Example Image: {}'.format(class_names[test_labels[rando]])) # + id="2dsD7KQG1m-R" import json with open('request.json', 'w') as f: json.dump({"signature_name": "serving_default", "instances": test_images[0:3].tolist()}, f) # + id="vZzjNS_8IcVQ" # !pip install sklearn from skimage import transform, io from sklearn.preprocessing import MinMaxScaler img_array = io.imread("https://shop.tate.org.uk/dw/image/v2/BBPB_PRD/on/demandware.static/-/Sites-TateMasterShop/default/dwaa107262/tate-logo-black--tshirt-back-g1086.jpg", as_gray=True) small_grey = transform.resize( img_array, (28, 28), mode='symmetric', preserve_range=True) small_grey = (small_grey * -1) small_grey = small_grey / 255.0 plt.imshow(small_grey) scaler = MinMaxScaler() attempt2 = small_grey scaler.fit_transform(attempt2) attempt2.reshape(28,28,1) small_grey.reshape(28,28,1) data_array = np.ndarray((1,28,28,1), dtype=float) np.append(data_array,attempt2) # + id="jBe6DfsObYwz" model.predict(data_array) # + prediction_array = model.predict(data_array) class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] result = { "prediction": class_names[np.argmax(prediction_array)], "confidence": '{:2.0f}%'.format(100*np.max(prediction_array)) } print(result) # -
REST_simple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from datetime import datetime # # Arrays # ## Lesson Objectives # By the end of this lesson, you will be able to: # - Construct an array using the numpy module # - Index, slice, and iterate through an array # - Reshape arrays # - Perform array math, including broadcasting # ## Table of Contents # - [Numpy](#numpy) # - [Arrays](#arrays) # - [Indexing, Slicing and Iterating](#indexing) # - [Shape Manipulation](#shape) # - [Array Math](#math) # - [Broadcasting](#broadcast) # - [Applications](#applications) # <a id='numpy'></a> # ## NumPy # NumPy is the fundamental package for scientific computing with Python. It contains, among other things: # # - a powerful N-dimensional array object # - sophisticated (broadcasting) functions # - tools for integrating C/C++ and Fortran code # - useful linear algebra, Fourier transformation, and random number capabilities # # NumPy also allows you to define arbitrary data types. This allows for quick integration with a wide variety of databases. # <a id='arrays'></a> # ## Arrays # A NumPy array is an *N*-dimensional list of values, all of the same type, that is indexed by a `tuple` of positive integers. # # When talking about an array, there's some terminology you need to be familiar with: # - The number of dimensions, called **axes** in NumPy lingo, is the **rank** of the array. # - The **shape** of an array is the tuple of integers giving the **length** of the array along each axis. # # For example, take the coordinates of a point in 3D space: `[1, 2, 1]` # # - This is an array of **rank** 1, because it has one axis. # - That axis has a **length** of 3, because there are three elements in it. # - Thus the **shape** of the array is (3,) # ### Array Creation # There are several ways to create an array. # # The most intuitive is to create one from a `list` or `tuple` using the NumPy `array()` function. When you do this, the type of the resulting array is deduced from the type of the elements in the container you passed. # + l = [[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]] a = np.array(l) a # - a.dtype #see what type numpy decided to go with # Note that you cannot create an array with multiple numeric arguments. They need to be in a container of some sort! a = np.array(1,2,3,4) # WRONG a = np.array([1,2,3,4]) # RIGHT # ### NumPy Array-Creating Functions # # > **`arange`** # # > returns evenly spaced values within a given interval. a = np.arange(15).reshape(3, 5) a # In some cases, you want to create an array although you don't yet know what's going to go into it. So long as you know the size, you can use one of several NumPy functions to create arrays with placeholder content. This minimizes the necessity of growing arrays, which is an expensive operation. # # > **`zeros`** # # > creates an array full of zeros. np.zeros((3,5)) # > **`ones`** # # > creates an array full of ones. np.ones( (2,3,4), dtype=np.int16 ) #you can specify the dtype when creating the array # > **`empty`** # # > creates an array whose initial content is random and depends on the state of the memory. By default, the `dtype` of the created array is `float64`. np.empty( (2,3) ) # output may vary # > **`arange`** # # > To create sequences of numbers, NumPy provides a function analogous to `range`. np.arange( 0, 100, 10 ) # it accepts int np.arange( 0, 10, 0.5 ) # or float arguments # > **`linspace`** # # > When `arange` is used with floats, [finite floating point precision](https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html) means that it's generally not possible to predict the number of elements that will be returned. Instead, you should use `linspace`, which receives as an argument the number of elements we want instead of the step: np.linspace( 0, 2, 10 ) # 9 numbers from 0 to 2 # ### Array Instance Methods # Like other objects, there are some useful instance methods you can use to inspect your arrays. # **`ndarray.ndim`** # > the number of axes (dimensions) of the array. Remember that the number of dimensions is referred to as the array's rank. a.ndim # **`ndarray.shape`** # > the dimensions of the array. This is a tuple of integers indicating the size of the array in each dimension. For a matrix with `n` rows and `m` columns, shape will be `(n,m)`. The length of the shape tuple is therefore the rank, or number of dimensions, `ndim`. a.shape # **`ndarray.size`** # > the total number of elements of the array. This is equal to the product of the elements of `shape`. a.size # **`ndarray.dtype`** # > an object describing the type of the elements in the array. One can create or specify dtypeโ€™s using standard Python types. Additionally NumPy provides types of its own. numpy.int32, numpy.int16, and numpy.float64 are some examples. a.dtype # **`ndarray.itemsize`** # > the size in bytes of each element of the array. For example, an array of elements of type float64 has itemsize 8 (=64/8), while one of type complex32 has itemsize 4 (=32/8). It is equivalent to ndarray.dtype.itemsize. a.itemsize # **`ndarray.data`** # > the buffer containing the actual elements of the array. Normally, we wonโ€™t need to use this attribute because we will access the elements in an array using indexing facilities. a.data # ### Excercises # <a id='indexing'></a> # ## Indexing, Slicing and Iterating # One-dimensional arrays can be indexed, sliced, and iterated over, much like lists and other Python sequences. a = np.arange(0,100,10) a a[2] a[2:5] a[:6:2] = 999 # from start to position 6, exclusive, set every 2nd element to 999 a a[ : :-1] # reverse a for i in a: print(i**(1/2.)) # Multidimensional arrays can have one index per axis. These indices are given in a tuple separated by commas: b = np.arange(0,9).reshape(3,3) #incidentally, you can construct an array by executing a function over each coordinate b # Here's an illustration for indexing a 2D array. # # <img src = 'assets/index_2d_array.png' height = 450 width = 450> b[0,2] b[0:3, 1] # each row in the second column of b b[ : ,1] # equivalent to the previous example b[1:3, : ] # each column in the second and third row of b # When fewer indices are provided than the number of axes, the missing indices are considered complete slices: b[-1] # the last row. Equivalent to b[-1,:] # The expression within brackets in `b[i]` is treated as an `i` followed by as many instances of `:` as needed to represent the remaining axes. NumPy also allows you to write this using dots as `b[i,...]`. # # The dots (`...`) represent as many colons as needed to produce a complete indexing tuple. For example, if `x` is a rank 5 array (i.e., it has 5 axes), then: # - `x[1,2,...]` is equivalent to `x[1,2,:,:,:]` # - `x[...,3]` is equivalent to `x[:,:,:,:,3]` # - `x[4,...,5,:]` is equivalent to `x[4,:,:,5,:]` # + c = np.array( [[[ 0, 1, 2], # a 3D array (two stacked 2D arrays) [ 10, 12, 13]], [[100,101,102], [110,112,113]]]) c # - c.shape c[1,...] # the same as c[1,:,:] or c[1] c[...,2] # the same as c[:,:,2] # Iterating over multidimensional arrays is done with respect to the first axis: for row in c: print(row) # However, if one wants to perform an operation on each element in the array, one can use the `flat` attribute, which is an iterator over all the elements of the array: for e in c.flat: print(e) # ### Fancy Indexing: Indexing with Arrays of Indices # It's NumPy and not me that calls this "fancy" indexing. Honest... # # So, in addition to indexing by integers and slices, NumPy arrays can also be indexed by arrays of integers. a = np.arange(10)**2 # the first 10 square numbers i = np.array( [ 1,1,4,9,5 ] ) # an array of indices a[i] # the elements of a at the positions i j = np.array( [ [ 3, 4], [ 9, 7 ] ] ) # a bi-dimensional array of indices a[j] # the same shape as j # When the indexed array `a` is multidimensional, a single array of indices refers to the first dimension of `a`: a = np.arange(15).reshape(5, 3) a slicer = np.array( [ [ 0, 1, 2, 0 ], [ 0, 3, 4, 0 ] ] ) a[slicer] # You can also give indexes for more than one dimension. But the arrays of indices for each dimension must have the same shape. a = np.arange(12).reshape(3,4) a # + i = np.array( [ [0,1], # the indices for the first dimension of a [1,2] ] ) j = np.array( [ [2,1], # the indices for the second dimension [3,3] ] ) a[i,j] # remember, i and j must have equal shape! # - # We could also have put `i` and `j` into a list and then sliced with that list: l = [i,j] a[l] # But we cannot put `i` and `j` into an array and then slice. That's because the array will be interpreted as indexing the first dimension of `a`. s = np.array( [i,j] ) a[s] # ### Indexing with Boolean Arrays # When we indexed an array with arrays of (integer) indices, we were providing the list of indices to pick. # # With boolean indices, we explicitly choose which items in the array we want and which ones we donโ€™t using a boolean expression. # # This is straightforward when the boolean array has the same shape as the original array: a = np.arange(12).reshape(3,4) b = a > 3 # using a boolean operator will return bool types b # b is an array of bools with a's shape # And now that we have an array of bools (`b`) equal in shape to array `a`, we can return a 1D array with the selected elements: a[b] # This trick can be useful when you want to make new assignments to an array: a[b] = 999 a # You can also supply a 1D boolean array for each dimension of the array you want to slice. a = np.arange(12).reshape(3,4) b1 = np.array([True,False,True]) # boolean array for first dimension of a b2 = np.array([True,False,True,False]) # boolean array for second dimension of a a a[b1] # selecting rows a[:,b2] # selecting columns # <a id='shape'></a> # ## Shape Manipulation # We've seen that an array has a shape given by the number of elements along each axis. We can change the shape of an array in a variety of ways without altering the original array. a = np.floor(10*np.random.random((4,5))) #another way to create an array, returning the floor of the input, elementwise a a.shape a.ravel() # returns a copy of the array, flattened a.reshape(10,2) # returns a copy of the array with a modified shape # If a dimension is given as -1 in a reshaping operation, the other dimensions are automatically calculated: a.reshape(20,-1) a.T # returns a transposed copy of the array # The above methods return [copies](https://docs.python.org/3.6/library/copy.html) of the array. You can use the `resize` method to modify the array itself: a a.resize(10,2) a # <a id='math'></a> # ## Array Math # Arrays enable you to execute batch operations on data without writing a `for` loop, like you'd have to do with a list. # # The simplest operation would be adding a [*scalar*](https://en.wikipedia.org/wiki/Scalar) (an individual quantity) to an array. In this case, the scalar's value is added to each element of the array. x = np.array([[1,2],[3,4]]) x + 1 # This process is called **vectorization**. It's the chief source of NumPy's efficiency gains over the built-in Python `list`. # # > *`Note that, so long as your arrays are of equal size, a vectorized operation will be applied elementwise. If the arrays are of different sizes, a process known as **broadcasting** is used (we'll see that later).`* # # For now, let's make two arrays of equal size and apply some basic operations. As usual, there's more than one way to skin a cat here. x = np.array([[1,2],[3,4]]) y = np.array([[5,6],[7,8]]) # > Addition x + y np.add(x, y) # > Subtraction x - y np.subtract(x, y) # > Multiplication x * y np.multiply(x, y) # > Division x / y np.divide(x, y) # > Square Root np.sqrt(x) # There are a lot of mathematical functions you can use with NumPy arrays. See the [documentation](https://docs.scipy.org/doc/numpy/reference/routines.math.html) to learn more. # # And see [this](https://docs.scipy.org/doc/numpy/reference/generated/numpy.vectorize.html) on how you can vectorize with your own functions. # <a id='broadcast'></a> # ## Broadcasting # Broadcasting allows NumPy to work with arrays of different shapes when performing arithmetic operations. This can make code more concise and faster. We use the term "broadcasting" because the smaller array is โ€œbroadcastโ€ across the larger array so that they have compatible shapes. # # We'll visualize how broadcasting works below. But first, let's consider the use-case and how we would achieve it using a `for` loop. # # Suppose that we wanted to add a constant vector `v` to each row of a matrix `m`. We could perform the operation with a loop like this: # + m = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) #create the matrix v = np.array([1, 0, 1]) #create the vector y = np.empty_like(m) #Create an empty matrix with the same shape as m # Use a loop to add v to each row of m for i in range(4): y[i, :] = m[i, :] + v y # - # Although a loop works, you'll notice a drag on performance when the matrix `m` is very large. That's because adding the vector `v` to each row of the matrix `m` is equivalent to forming a matrix of vertically stacked copies of `v` (`vv`) and *then* performing elementwise summation of `m` and `vv`. # # Broadcasting can perform this computation without actually creating stacked copies of `v`. What's more, by vectorizing your array operations, looping occurs in C instead of Python. This makes everything much faster! y = m + v # Add v to each row of m. Because of broadcasting, it doesnt matter that the shapes aren't the same. y # Thanks to broadcasting, `y = m + v` works even though `m` has shape (4, 3) and v has shape (3,). The trick is that, behind the scenes, NumPy treated `v` as if its shape were (4, 3). # # To get a better idea of this consider the following 3 examples. Each broadcasting operation is followed by an image that shows the elements of each array in **bold-bordered** cells. To visualize where the broadcasting is happening, I've added the lightly-shaded cells. np.arange(3) + 1 # <img src='assets/broadcast1.png' height = 650 width = 650> np.ones((3,3)) + np.arange(3) # <img src='assets/broadcast2.png' height = 650 width = 650> np.arange(3).reshape((3,1)) + np.arange(3) # <img src='assets/broadcast3.png' height = 650 width = 650> # To read more about broadcasting, check out the [documentation](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html). # <a id='applications'></a> # ## Applications # ### Need for Speed: When to use a NumPy array over a Python list. # In Python, lists are an efficient general-purpose container. `list` methods and operations - e.g. popping, appending, and concatenation - work well for most purposes. Plus they're easy to construct and manipulate using `for` loops or list comprehensions. # # Even so, lists have a few limitations: # - First, they donโ€™t support โ€œvectorizedโ€ operations like elementwise addition and multiplication. # - Second, since lists can contain objects of differnt types, Python has to store `type` information for every single element and then execute [type dispatching](https://en.wikipedia.org/wiki/Dynamic_dispatch) whenever an operation is performd on an element within a list. This can consume a lot of memory (and time). # # Since a NumPy array contains uniform values, you sacrifice flexibility for performance. When is this trade-off worth it? Whenever you can replace list loops/comprehensions with a vectorized operation. # # To see this in action, we'll do some vectorized addition on two lists of lists and then again with arrays. def calculate_time(expression): nitems = 1000000 narray = np.arange(nitems) larray = range(nitems) start = datetime.now() val = eval(expression) end = datetime.now() return "%d micro seconds %s" %((end-start).microseconds,expression) # + numpy_sum = "narray.sum()" list_sum = "sum(larray)" print("Numpy: {}".format(calculate_time(numpy_sum))) print("Lists: {}".format(calculate_time(list_sum))) # - # Although the above is a simple example, performance differences can grow quite steeply when you choose to work with a multidimensional array instead of a list of lists. # ### Manipulating Color Pallete Arrays # In this example, we'll see how you can use NumPy arrays to manipulate an image. # # First, let's import a plotting library: # + import matplotlib.pyplot as plt import matplotlib.image as mpimg #This turns on "inline" plotting, which will make plots appear in the notebook # %matplotlib inline # - img = mpimg.imread('assets/moon.png') plt.imshow(img) # Behind the scenes, this image is a NumPy array! Let's look at its shape and see how the matches some of the image info. img.shape # Here's some detail on the image from my command line: # # <img src = 'assets/image_info.png' height = 250 width = 250> # # Note how the shape corresponds to the pixel height, pixel width and samples per pixel. img.dtype # Also note the `dtype`: `float32`. Matplotlib has rescaled the 8 bit data from each sample to floating point numbers between 0.0 and 1.0. # In our image array, each inner array represents a pixel: 2,832 `x` 2,824 = 7,997,568 pixels. # # Each of these pixels then has 4 values tied to it. The first 3 are RGB (red, green, blue) values. Since our image is an RGB*A*, the fourth value is the A, which stands for the alpha/opacity channel. If a pixel has a value of 0 in its alpha channel, it is fully transparent (and, thus, invisible), whereas a value of 1 in the alpha channel gives a fully opaque pixel (traditional digital images). # # Let's look at an individual pixel to get an idea of what we're working with. Let's choose the pixel at width 1500 and height 1500. img[1500,1500,...] # Above, you can see: # - Red = 0.74117649 # - Green = 0.74117649 # - Blue = 0.74117649 # - Alpha = 1 # # Now that we understand our array's organization, let's applying some [pseudocolor](https://en.wikipedia.org/wiki/False_color). # # > Pseudocolor is a tool for enhancing contrast in single-channel, grayscale, luminosity images. Since R, G, and B are all similar (as we saw above), we can just pick one channel of our data: pseudo = img[:,:,1] # Now, with that we have 2D (no color) image, the default matplotlib colormap will be applied. The default is called viridis. It's greenish: plt.imshow(pseudo) plt.imshow(pseudo, cmap="hot") plt.imshow(pseudo, cmap="nipy_spectral")
Learn Python/3. Data Structures/2. Arrays (the numpy module)/Arrays.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from scipy.integrate import odeint import numpy as np import matplotlib.pyplot as plt # Our SEIRD model plus intensive care # + # describe the model def deriv(y, t, N, beta, gamma, delta, alpha, rho, my, z, daysiniva, ivaprob, ivaproblive): S, E, I, R, D, In, Ink, Inm, Dk, Dm, limit = y dSdt = -beta(t) * S * I / N + my * R dEdt = beta(t) * S * I / N - delta * E dIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * I - z * ivaprob * I dRdt = (1 - alpha) * gamma * I - my * R + daysiniva * ivaproblive * In dDdt = alpha * rho * I + daysiniva * (1 - ivaproblive) * In dIndt = z * ivaprob * I - daysiniva * (1 - ivaproblive) * In - daysiniva * ivaproblive * In dInkdt = dIndt * w dInmdt = dIndt * (1-w) dDkdt = dDdt * deadwomen dDmdt = dDdt * (1 - deadwomen) dlimitdt = 0 # Adding line for limit of intensive care seats return dSdt, dEdt, dIdt, dRdt, dDdt, dIndt, dInkdt, dInmdt, dDkdt, dDmdt, dlimitdt # - # We chose to use relevant data over the spreading of the decease to make a fundamental ond objective model. # # We have split Recovered into two different curves, one for persons who have recovered, R and one for people who have died, D. # To do that we used the percent for how many people that have died because of Covid in Sweden which was approximately 2% according to Folkhรคlsomyndigheten by the time we extracted the data. # # We added a group In, intensive care and used the percent 0.01 of all people who have been sick according to Folkhรคlsomyndigheten # Then we split this group In into two groups, women and men and we used the percent here as well to show the difference. # # We took the data for intensive care from this website https://experience.arcgis.com/experience/2dc63e26f509468f896ec69476b0dab3 # The data was downloaded on the 12 of January which might explain any slight differences to current data. # # + N = 10000000 # Approximately Swedens population gamma = 1.0 / 7.0 # A person is sick for approximately 7 days. rho = 1.0 / 12.0 #dying 9 days after first day sick delta = 1.0 / 5.0 # Incubation time alpha = 0.02 # 2% death rate my = 1.0 / 180.0 # 180 days to become susceptible again z = 1.0 / 10.7 # rate of days between sickness to intensive care https://www.icuregswe.org/data--resultat/covid-19-i-svensk-intensivvard/ ivaprob = 0.008 # probability of ending up on intensive care ivaproblive = 0.8 # 80% survival rate on IVA https://lakartidningen.se/aktuellt/nyheter/2020/05/for-tidigt-att-uttala-sig-om-overlevnad-i-intensivvarden/ daysiniva = 1/14 # two weeks #Percentage: w=0.288 #28.8 % women in intensive care deadwomen = 0.46 L1 = 60 # L is differnet timeperiod for different R0 L2 = 80 L3 = 200 def R_0(t): if t < L1: return 5.0 elif L1 < t < L2: return 2.0 elif L2 < t < L3: return 1.5 else: return 4.0 #R0 increases again and result in second wave def beta(t): return R_0(t) * gamma # R_0 depends of the time, how many persons one person infects, to se how the spread of the virus looks # if the government puts int harder restrictions, a lockdown after L days. S0, E0, I0, R0, D0, In0, Ink0, Inm0, Dk0, Dm0, limit0 = N-1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 500 # initial conditions: one infected, rest susceptible # + t = np.linspace(0, 300, 1000) # Grid of time points (in days) y0 = S0, E0, I0, R0, D0, In0, Ink0, Inm0, Dk0, Dm0, limit0 # Initial conditions vector # Integrate the SEIRDIn equations over the time grid, t. ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta, alpha, rho, my, z, daysiniva, ivaprob, ivaproblive)) S, E, I, R, D, In, Ink, Inm, Dk, Dm, limit = ret.T # - # + def plotseird(t, S, E, I, R, D): f, ax = plt.subplots(1,1,figsize=(10,4)) ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible') ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed') ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected') ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered') ax.plot(t, D, 'k', alpha=0.7, linewidth=2, label='Deceased') ax.set_xlabel('Time (days)') ax.yaxis.set_tick_params(length=0) ax.xaxis.set_tick_params(length=0) ax.grid(b=True, which='major', c='w', lw=2, ls='-') legend = ax.legend() legend.get_frame().set_alpha(0.5) for spine in ('top', 'right', 'bottom', 'left'): ax.spines[spine].set_visible(False) plt.show(); # - def plotiva(t, In, Ink, Inm, limit): f, ax = plt.subplots(1,1,figsize=(10,4)) ax.plot(t, In, 'y', alpha=0.7, linewidth=2, label='Intensive care') ax.plot(t, Ink, 'g', alpha=0.7, linewidth=2, label='Intensive care women (28.8%)') ax.plot(t, Inm, 'c', alpha=0.7, linewidth=2, label='Intensive care men (71.2%)') ax.plot(t, limit, 'k', alpha=0.7, linewidth=2, label='Limit intensive care') #ax.plot(t, m) ax.set_xlabel('Time (days)') ax.yaxis.set_tick_params(length=0) ax.xaxis.set_tick_params(length=0) ax.grid(b=True, which='major', c='w', lw=2, ls='-') legend = ax.legend() legend.get_frame().set_alpha(0.5) for spine in ('top', 'right', 'bottom', 'left'): ax.spines[spine].set_visible(False) plt.show(); def plotdead(t, dDkdt, dDmdt): f, ax = plt.subplots(1,1,figsize=(10,4)) ax.plot(t, Dk, 'g', alpha=0.7, linewidth=2, label='Death women (46%)') ax.plot(t, Dm, 'c', alpha=0.7, linewidth=2, label='Death men (54%)') ax.set_xlabel('Time (days)') ax.yaxis.set_tick_params(length=0) ax.xaxis.set_tick_params(length=0) ax.grid(b=True, which='major', c='w', lw=2, ls='-') legend = ax.legend() legend.get_frame().set_alpha(0.5) for spine in ('top', 'right', 'bottom', 'left'): ax.spines[spine].set_visible(False) plt.show(); # plot the graph # + plotseird(t, S, E, I, R, D) plotiva(t, In, Ink, Inm, limit) plotdead(t, Dk, Dm) # - # When we read about different # data on Folkhรคlsomyndigheten we found it intresting that it was a big difference between # women and men, 72% men and 28% women, who hade been intensive cared so whe chose to show this in a separate graph.
SEIRDIn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Clairvoyante (Visualization) # # %matplotlib inline # %pylab inline import sys sys.path.append('../') import time import numpy as np import tensorflow as tf #import clairvoyante import clairvoyante.utils_v2 as utils import clairvoyante.clairvoyante_v3 as cv import clairvoyante.param as param # ## Load a trained model # Load a trained model into memory m = cv.Clairvoyante() m.init() m.restoreParameters("../trainedModels/fullv3-illumina-novoalign-hg001-hg38/learningRate1e-3.epoch999.learningRate1e-4.epoch1499") # ## Show number of parameters in the model # Show the number of parameters of the model r = tf.train.NewCheckpointReader("../trainedModels/fullv3-illumina-novoalign-hg001-hg38/learningRate1e-3.epoch999.learningRate1e-4.epoch1499") p = r.get_variable_to_shape_map() tot = 0 for k, v in sorted(p.items(), key=lambda x: x[0]): if 'Momentum' not in k and\ 'global_step' not in k and\ 'Adam' not in k and\ 'power' not in k: tmp = np.prod(v) tot += tmp print >> sys.stderr, '%s: %s => %d' % (k, str(v), tmp) print >> sys.stderr, 'Total number of parameters: %d' % tot # ## Load a dataset total, XArrayCompressed, YArrayCompressed, posArrayCompressed = \ utils.GetTrainingArray("../training/tensor_can_chr22", "../training/var_chr22", "../training/bed", shuffle = False) # ## Show the tensors of a variant # + # Show how the four matrices are like i = 1234 XArray, _, _ = utils.DecompressArray(XArrayCompressed, i, 1, total) figure(figsize=(15, 8)); plt.subplot(4,1,1); plt.xticks(np.arange(0, 33, 1)); plt.yticks(np.arange(0, 4, 1), ['A','C','G','T']) plt.imshow(XArray[0,:,:,0].transpose(), vmin=0, vmax=50, interpolation="nearest", cmap=cm.hot); colorbar() plt.subplot(4,1,2); plt.xticks(np.arange(0, 33, 1)); plt.yticks(np.arange(0, 4, 1), ['A','C','G','T']) plt.imshow(XArray[0,:,:,1].transpose(), vmin=-50, vmax=50, interpolation="nearest", cmap=cm.bwr); colorbar() plt.subplot(4,1,3); plt.xticks(np.arange(0, 33, 1)); plt.yticks(np.arange(0, 4, 1), ['A','C','G','T']) plt.imshow(XArray[0,:,:,2].transpose(), vmin=-50, vmax=50, interpolation="nearest", cmap=cm.bwr); colorbar() plt.subplot(4,1,4); plt.xticks(np.arange(0, 33, 1)); plt.yticks(np.arange(0, 4, 1), ['A','C','G','T']) plt.imshow(XArray[0,:,:,3].transpose(), vmin=-50, vmax=50, interpolation="nearest", cmap=cm.bwr); colorbar() # - # ## Predict variants # + YArray, _, _ = utils.DecompressArray(YArrayCompressed, 0, total, total) print >> sys.stderr, "Testing on the training and validation dataset ..." predictStart = time.time() predictBatchSize = param.predictBatchSize datasetPtr = 0 XBatch, _, _ = utils.DecompressArray(XArrayCompressed, datasetPtr, predictBatchSize, total) bases = []; zs = []; ts = []; ls = [] base, z, t, l = m.predict(XBatch) bases.append(base); zs.append(z); ts.append(t); ls.append(l) datasetPtr += predictBatchSize while datasetPtr < total: XBatch, _, endFlag = utils.DecompressArray(XArrayCompressed, datasetPtr, predictBatchSize, total) base, z, t, l = m.predict(XBatch) bases.append(base); zs.append(z); ts.append(t); ls.append(l) datasetPtr += predictBatchSize if endFlag != 0: break bases = np.concatenate(bases[:]); zs = np.concatenate(zs[:]); ts = np.concatenate(ts[:]); ls = np.concatenate(ls[:]) print >> sys.stderr, "Prediciton time elapsed: %.2f s" % (time.time() - predictStart) # Evaluate the trained model YArray, _, _ = utils.DecompressArray(YArrayCompressed, 0, total, total) print >> sys.stderr, "Version 2 model, evaluation on base change:" allBaseCount = top1Count = top2Count = 0 for predictV, annotateV in zip(bases, YArray[:,0:4]): allBaseCount += 1 sortPredictV = predictV.argsort()[::-1] if np.argmax(annotateV) == sortPredictV[0]: top1Count += 1; top2Count += 1 elif np.argmax(annotateV) == sortPredictV[1]: top2Count += 1 print >> sys.stderr, "all/top1/top2/top1p/top2p: %d/%d/%d/%.2f/%.2f" %\ (allBaseCount, top1Count, top2Count, float(top1Count)/allBaseCount*100, float(top2Count)/allBaseCount*100) print >> sys.stderr, "Version 2 model, evaluation on Zygosity:" ed = np.zeros( (2,2), dtype=np.int ) for predictV, annotateV in zip(zs, YArray[:,4:6]): ed[np.argmax(annotateV)][np.argmax(predictV)] += 1 for i in range(2): print >> sys.stderr, "\t".join([str(ed[i][j]) for j in range(2)]) print >> sys.stderr, "Version 2 model, evaluation on variant type:" ed = np.zeros( (4,4), dtype=np.int ) for predictV, annotateV in zip(ts, YArray[:,6:10]): ed[np.argmax(annotateV)][np.argmax(predictV)] += 1 for i in range(4): print >> sys.stderr, "\t".join([str(ed[i][j]) for j in range(4)]) print >> sys.stderr, "Version 2 model, evaluation on indel length:" ed = np.zeros( (6,6), dtype=np.int ) for predictV, annotateV in zip(ls, YArray[:,10:16]): ed[np.argmax(annotateV)][np.argmax(predictV)] += 1 for i in range(6): print >> sys.stderr, "\t".join([str(ed[i][j]) for j in range(6)]) # - # ## Visualizing predictions # Compare the output of the expected calls and the predicted calls arrayStart = 0 arrayEnd = 100 figure(figsize=(20, 5)) matshow(YArray[arrayStart:arrayEnd,:].transpose(), fignum=0, cmap=cm.PuRd) figure(figsize=(20, 5)) matshow(np.concatenate( (bases[arrayStart:arrayEnd,:],zs[arrayStart:arrayEnd,:],\ ts[arrayStart:arrayEnd,:],ls[arrayStart:arrayEnd,:]),1).transpose(), fignum=0, cmap=cm.PuRd) # ## Visualizing Hidden Layers # + def GetActivations(layer, batchX): # Version 2 network ''' units = m.session.run(layer, feed_dict={m.XPH:batchX, m.phasePH:False, m.dropoutRatePH:0.0}) ''' # Version 3 network units = m.session.run(layer, feed_dict={m.XPH:batchX, m.phasePH:False, m.dropoutRateFC4PH:0.0, m.dropoutRateFC5PH:0.0, m.l2RegularizationLambdaPH:0.0}) return units def PlotFiltersConv(units, interval=1, xsize=18, ysize=20): matplotlib.rc('ytick', labelsize=10) matplotlib.rc('xtick', labelsize=8) filters = units.shape[3] xlen = units.shape[2] plt.figure(1, figsize=(xsize,ysize)) nColumns = 8 nRows = math.ceil(filters / nColumns) + 1 for i in range(filters): plt.subplot(nRows, nColumns, i+1) plt.title('Filter ' + str(i)) plt.xticks(np.arange(0, xlen, interval), ['A','C','G','T']) plt.imshow(units[0,:,:,i], interpolation="nearest", cmap=cm.Purples) def PlotFiltersFC(units, interval=10, xsize=18, ysize=4): matplotlib.rc('ytick', labelsize=10) matplotlib.rc('xtick', labelsize=8) plt.figure(1, figsize=(xsize,ysize)) cell = units.shape[1] plt.xticks(np.arange(0, cell, interval)) plt.yticks(np.arange(0, 1, 1), ['']) plt.title(str(cell) + ' units') plt.imshow(np.reshape(units[0,:], (-1,cell)), interpolation="nearest", cmap=cm.Purples) # - # Load a tensor i = 0 XArray, _, _ = utils.DecompressArray(XArrayCompressed, i, 1, total) # Visualizing conv1 units = [] units = GetActivations(m.conv1, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersConv(units) # Visualizing conv2 units = [] units = GetActivations(m.conv2, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersConv(units) # Visualizing conv3 units = [] units = GetActivations(m.conv3, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersConv(units) # Visualizing fc4 units = [] units = GetActivations(m.fc4, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersFC(units) # Visualizing fc5 units = [] units = GetActivations(m.fc5, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersFC(units) # Visualizing YBaseChangeSigmoid units = [] units = GetActivations(m.YBaseChangeSigmoid, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersFC(units,2,1) # Visualizing YZygositySoftmax units = [] units = GetActivations(m.YZygositySoftmax, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersFC(units,1,1) # Visualizing YVarTypeSoftmax units = [] units = GetActivations(m.YVarTypeSoftmax, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersFC(units,2,1) # Visualizing YIndelLengthSoftmax units = [] units = GetActivations(m.YIndelLengthSoftmax, XArray) print >> sys.stderr, "Hidden layer shape: ", units.shape PlotFiltersFC(units,3,1)
jupyter_nb/visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109B Data Science 2: Advanced Topics in Data Science # ## Lab 1 - Introduction and Setup # # **Harvard University**<br> # **Spring 2020**<br> # **Instructors:** <NAME>, <NAME>, and <NAME><br> # **Lab Instructors:** <NAME> and <NAME><br> # **Contributors:** <NAME> and <NAME> # # --- ## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text HTML(styles) # + import numpy as np #import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - # ## Learning Goals # # The purpose of this lab is to get you up to speed with what you will need to run the code for CS109b. # # ## 1. Getting Class Material # # ### Option 1A: Cloning the class repo and then copying the contents in a different directory so you can make changes. # # * Open the Terminal in your computer and go to the Directory where you want to clone the repo. Then run # # `git clone https://github.com/Harvard-IACS/2020-CS109B.git` # # * If you have already cloned the repo, go inside the '/2020-CS109B/' directory and run # # `git pull` # # * If you change the notebooks and then run `git pull` your changes will be overwritten. So create a `playground` folder and copy the folder with the notebook with which you want to work there. # # ### Option 1B: Forking the class repo # # To get access to the code used in class you will need to clone the class repo: [https://github.com/Harvard-IACS/2020-CS109B](https://github.com/Harvard-IACS/2020-CS109B) # # In order not to lose any changes you have made when updating the content (pulling) from the main repo, a good practice is to `fork` the repo locally. For more on this see <NAME>'s notes: [How to Fork a Repo](https://github.com/Harvard-IACS/2020-CS109B/raw/master/docs/labs/lab01/ForkRepo.pdf). NOTE: While Fork is a proper way to handle local changes, it doesn't magically solve everything -- if you edit a file that originated from our course repo (e.g., a HW notebook), and later pull from our 'upstream' repo again, any changes you make will require resolving `merge conflict(s)`. Thus, if you want to safetly and easily preserve any of your changes, we recommend renaming your files and/or copying them into an independent directory within your repo. # # You will need this year's repo: `https://github.com/Harvard-IACS/2020-CS109B.git` # ## 2. Running code: # # ## Option 2A: Managing Local Resources (supported by cs109b) # # ### Use Virtual Environments: I cannot stress this enough! # # Isolating your projects inside specific environments helps you manage dependencies and therefore keep your sanity. You can recover from mess-ups by simply deleting an environment. Sometimes certain installation of libraries conflict with one another. # # In order of isolation here is what you can do: a) set up a virtual environment, b) set up a virtual machine. # The two most popular tools for setting up environments are: # # - `conda` (a package and environment manager) # - `pip` (a Python package manager) with `virtualenv` (a tool for creating environments) # # We recommend using `conda` package installation and environments. `conda` installs packages from the Anaconda Repository and Anaconda Cloud, whereas `pip` installs packages from PyPI. Even if you are using `conda` as your primary package installer and are inside a `conda` environment, you can still use `pip install` for those rare packages that are not included in the `conda` ecosystem. # # See here for more details on how to manage [Conda Environments](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). # <div class='exercise'> <b> Exercise 1: Clone of Fork the CS109b git repository. Use the cs109b.yml file to create an environment:</div> # # ``` # $ cd /2020-CS109B/content/labs/lab01/ # $ conda env create -f cs109b.yml # $ conda activate cs109b # ``` # We have included the packages that you will need in the `cs109b.yml` file. It should be in the same directory as this notebook. # ## Option 2B: Using Cloud Resources (optional) # ### Using SEAS JupyterHub (supported by cs109b) # # [Instructions for Using SEAS JupyterHub](https://canvas.harvard.edu/courses/65462/pages/instructions-for-using-seas-jupyterhub) # # SEAS and FAS are providing you with a platform in AWS to use for the class, accessible via the 'JupyterHub' menu link in Canvas. Between now and March 1, each student will have their own t2.medium AWS ec2 instance with 4GB CPU RAM, and 2 vCPUs. After March 1st the instances will be upgraded to p2.xlarge AWS ec2 instances with a GPU, 61GB CPU RAM, 12GB GPU RAM, 10gB disk space, and 4 vCPUs. # # Most of the libraries such as keras, tensorflow, pandas, etc. are pre-installed. If a library is missing you may install it via the Terminal. # # **NOTE : The AWS platform is funded by SEAS and FAS for the purposes of the class. It is not running against your individual credit. You are to use it with prudence; also it is not allowed to use it for purposes not related to this course.** # # **Help us keep this service: Make sure you stop your instance as soon as you do not need it.** # ### Using Google Colab (on your own) # # Google's Colab platform [https://colab.research.google.com/](https://colab.research.google.com/) offers a GPU enviromnent to test your ideas, it's fast, free, with the only caveat that your files persist only for 12 hours. The solution is to keep your files in a repository and just clone it each time you use Colab. # ### Using AWS in the Cloud (on your own) # # For those of you who want to have your own machines in the Cloud to run whatever you want, Amazon Web Services is a (paid) solution. For more see: [https://docs.aws.amazon.com/polly/latest/dg/setting-up.html](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html) # # Remember, AWS is a paid service so if you let your machine run for days you will get charged!<BR> # ![aws-dog](../images/aws-dog.jpeg) # # *source: maybe Stanford's cs231n via Medium* # ## 3. Ensuring everything is installed correctly # # ## Packages we will need for this class # # - **Clustering**: # - Sklearn - [https://scikit-learn.org/stable/](https://scikit-learn.org/stable/) # - scipy - [https://www.scipy.org](https://www.scipy.org) # - gap_statistic (by <NAME>) - [https://anaconda.org/milesgranger/gap-statistic/notebook](https://anaconda.org/milesgranger/gap-statistic/notebook) # # - **Smoothing**: # - statsmodels - [https://www.statsmodels.org/](https://www.statsmodels.org/)<br> # statsmodels examples: https://www.statsmodels.org/stable/examples/index.html#regression<BR> # - scipy # - pyGAM - [https://pygam.readthedocs.io/en/latest/](https://pygam.readthedocs.io/en/latest/) # # - **Bayes**: # - pymc3 - [https://docs.pymc.io](https://docs.pymc.io) # # - **Neural Networks**: # - keras - [https://www.tensorflow.org/guide/keras](https://www.tensorflow.org/guide/keras) # # # We will test that these packages load correctly in our environment. from sklearn import datasets iris = datasets.load_iris() digits = datasets.load_digits() digits.target # you should see [0, 1, 2, ..., 8, 9, 8] # + from scipy import misc import matplotlib.pyplot as plt face = misc.face() plt.imshow(face) plt.show() # you should see a racoon # + import statsmodels.api as sm import statsmodels.formula.api as smf # Load data dat = sm.datasets.get_rdataset("Guerry", "HistData").data dat.head() # + from pygam import PoissonGAM, s, te from pygam.datasets import chicago from mpl_toolkits.mplot3d import Axes3D X, y = chicago(return_X_y=True) gam = PoissonGAM(s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y) # + XX = gam.generate_X_grid(term=1, meshgrid=True) Z = gam.partial_dependence(term=1, X=XX, meshgrid=True) ax = plt.axes(projection='3d') ax.plot_surface(XX[0], XX[1], Z, cmap='viridis') # - import pymc3 as pm print('Running PyMC3 v{}'.format(pm.__version__)) # you should see 'Running on PyMC3 v3.8' # ## Plotting # ### `matplotlib` and `seaborn` # # - `matplotlib` # - [seaborn: statistical data visualization](https://seaborn.pydata.org/). `seaborn` works great with `pandas`. It can also be customized easily. Here is the basic `seaborn` tutorial: [Seaborn tutorial](https://seaborn.pydata.org/tutorial.html). # #### Plotting a function of 2 variables using contours # # In optimization, our objective function will often be a function of two or more variables. While it's hard to visualize a function of more than 3 variables, it's very informative to plot one of 2 variables. To do this we use contours. First we define the $x1$ and $x2$ variables and then construct their pairs using `meshgrid`. import seaborn as sn x1 = np.linspace(-0.1, 0.1, 50) x2 = np.linspace(-0.1, 0.1, 100) xx, yy = np.meshgrid(x1, x2) z = np.sqrt(xx**2+yy**2) plt.contour(x1,x2,z); # ## We will be using `tensorflow` and `keras` # # **[TensorFlow](https://www.tensorflow.org)** is a framework for representing complicated ML algorithms and executing them in any platform, from a phone to a distributed system using GPUs. Developed by Google Brain, TensorFlow is used very broadly today. # # **[Keras](https://keras.io/)**, is a high-level API used for fast prototyping, advanced research, and production. We will use `tf.keras` which is TensorFlow's implementation of the `keras` API. # <div class="exercise"><b>Exercise 2: Run the following cells to make sure you have the basic libraries to do deep learning</b></div> # + from __future__ import absolute_import, division, print_function, unicode_literals # TensorFlow and tf.keras import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import models from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.regularizers import l2 tf.keras.backend.clear_session() # For easy reset of notebook state. print(tf.__version__) # You should see a >2.0.0 here! print(tf.keras.__version__) # - # Checking if our machine has NVIDIA GPUs. Mine does not.. hasGPU = tf.config.experimental_list_devices() print(f'My computer has the following GPUs: {hasGPU}') # <div class="exercise"><b>DELIVERABLES</b></div> # # **Submit this notebook to Canvas with the output produced**. Describe below the environment in which you will be working, e.g. I have installed the environment needed locally and have tested all the code in this notebook OR/and I am using JupyterHub # # ---------------- your answer here # # I have installed the environment needed locally and have tested all the code in this notebook. # # -----------------
content/labs/lab01/notebook/cs109b_lab1_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from traitlets import link from ipywidgets import HBox import bqplot.pyplot as plt from ipysheet import sheet, cell, column size = 18 scale = 100. np.random.seed(0) x_data = np.arange(size) y_data = np.cumsum(np.random.randn(size) * scale) fig = plt.figure() axes_options = {'x': {'label': 'Date', 'tick_format': '%m/%d'}, 'y': {'label': 'Price', 'tick_format': '0.0f'}} scatt = plt.scatter(x_data, y_data, colors=['red'], stroke='black') fig.layout.width = '70%' sheet1 = sheet(rows=size, columns=2) x_column = column(0, x_data) y_column = column(1, y_data) link((scatt, 'x'), (x_column, 'value')) link((scatt, 'y'), (y_column, 'value')) HBox((fig, sheet1))
examples/bqplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COUGH FEATURE ANALYSIS # + import pandas as pd import numpy as np import glob import pickle #the ids of the coughs-deep.wave pid_list = glob.glob('positiveaudio/*') #coughwavefiles pwavefile_list=[] for ids in pid_list: pwavefile_list = pwavefile_list + [ids+'/cough-heavy.wav'] pjson_id = glob.glob('positive/*') pjson_list = [] for ji in pjson_id: pjson_list=pjson_list + [ji+'/cough-heavy_v2.json'] nid_list = glob.glob('negativeaudio/*') #coughwavefiles nwavefile_list=[] for ids in nid_list: nwavefile_list = nwavefile_list + [ids+'/cough-heavy.wav'] njson_id = glob.glob('negative/*') njson_list = [] for ji in njson_id: njson_list=njson_list + [ji+'/cough-heavy_v2.json'] with open('positiveids.txt', 'wb') as pi: pickle.dump(pid_list,pi) with open('negativeids.txt', 'wb') as ri: pickle.dump(nid_list, ri) # + import os import pandas as pd file_list = glob.glob('both/*') id_list = [] for file in file_list: id_list = id_list + [os.path.basename(file)] #coughwavefiles wavefile_list=[] for ids in id_list: wavefile_list = wavefile_list + ['bothaudio/'+ids+'/cough-heavy.wav'] json_list = [] for ji in id_list: json_list=json_list + ['both/'+ji+'/cough-heavy_v2.json'] with open('ids.txt', 'wb') as i: pickle.dump(id_list, i) # + import json as js metadata = [] for ids in id_list: metadata = metadata + ['bothaudio/'+ids+'/metadata.json'] with open('meadatapaths.txt', 'wb') as md: pickle.dump(metadata, md) df = pd.DataFrame(columns=['ID','gender','age','status','number of cycles','mean duration','avg time between cycles']) df['ID'] = id_list for jl in range(len(metadata)): with open(metadata[jl]) as jsonfile: data = js.load(jsonfile) df['gender'][jl] = data['g'] df['age'][jl] = data['a'] if(data['covid_status'] == 'healthy'): df['status'][jl] = 'non_covid' elif(data['covid_status'] == 'resp_illness_not_identified'): df['status'][jl] = 'non_covid' elif(data['covid_status'] == 'no_resp_illness_exposed'): df['status'][jl] = 'non_covid' else: df['status'][jl] = 'covid' df['status'].value_counts() # - # 100 cough files are chosen that are reasonably clean. 80 non-covid and 20 covid cough samples are chosen. Four physical aspects of this data is introspected.They are:- # 1)age # 2)number of cough cycles per file (3 was asked of the patients, but there has been variations). # 3)Duration of each cough cycle in covid and non-covid # 4)Average duration between cough cycles for each ID. (In rare cases wherein only 1 cough bout has been detected, (such files being smaller than the usual), it is presumed that the next cycle is just after the end of the audio, and hence the average duration is length_of_audio_file-end_boundary import librosa for jl in range(len(json_list)): with open(json_list[jl]) as jsonfile: data = js.load(jsonfile) del data['vol'] del data['stage'] del data['cont'] del data['annotator_name'] del data['quality'] avg = 0 df['number of cycles'][jl] = int(len(data)/2) for l in range(int(len(data)/2)): avg = (avg + (data['end_{}'.format(l+1)]-data['start_{}'.format(l+1)]))/2 df['mean duration'][jl] = avg y,sr = librosa.load(wavefile_list[jl], sr = 16000) dur = librosa.get_duration(y=y, sr = sr) mean_time_between_cycles = 0 if(df['number of cycles'][jl] > 1): for l in range(int(len(data)/2)-1): mean_time_between_cycles = (mean_time_between_cycles + (data['start_{}'.format(l+2)]-data['end_{}'.format(l+1)]))/2 else: mean_time_between_cycles = dur - data['end_{}'.format(l+1)] df['avg time between cycles'][jl] = mean_time_between_cycles df.to_csv('cough_params.csv') df.head() params = df.columns params # + #distribution of number of cycles import seaborn as sns import matplotlib.pyplot as plt sns.displot(data = df, x = 'number of cycles', kind ="hist", hue = 'status') plt.title('distribution of number of cycles') plt.savefig('numcycles.png') # - # As we can see, most of the recorded data(61) have 3 cycles per files. But there are outliers towards 1 cycle and 12 cycle. The covid positive data in these 100 samples closely follows the non-covid distribution, having considerable amount of data in 1/2 samples. # + #distribution import seaborn as sns import matplotlib.pyplot as plt sns.stripplot(data = df, x = 'gender',y = 'mean duration', hue = 'status') plt.title('mean duration of each cough cycle') plt.savefig('meandurationofeachcycle.png') # - # The plot of mean duration of each cough cycle between male and female. # + #distribution import seaborn as sns import matplotlib.pyplot as plt sns.jointplot(data = df, x = 'avg time between cycles',y = 'mean duration') #plt.title('relation between mean duration of each cough cycle to avg time between them.') plt.savefig('meandurationofeachcycle.png') # + df['age'] = df.age.astype(int) df['number of cycles'] = df["number of cycles"].astype(float) df['mean duration'] = df["mean duration"].astype(float) df['avg time between cycles'] = df["avg time between cycles"].astype(float) df['status'] = df["status"].astype('str') df['gender'] = df["gender"].astype('str') df.dtypes # + #distribution import seaborn as sns import matplotlib.pyplot as plt sns.jointplot(data = df, x = 'avg time between cycles',y = 'mean duration', hue = 'status') plt.savefig('huestatus.png') # + #distribution import seaborn as sns import matplotlib.pyplot as plt sns.jointplot(data = df, x = 'avg time between cycles',y = 'mean duration', hue = 'gender') plt.savefig('huegender.png') # - # The above two plots show that there is some marginal correlation between the two variables that describe average time bewteen cough cycles and mean duration between cough cycles df.describe() # The statistics of the basic parameters considered. d=df[['age','number of cycles', 'mean duration','avg time between cycles']] plt.figure(figsize=(8,8)) sns.heatmap(d.corr(), annot=True) plt # From the correlation graph we can see that there is 22% positive correlation between mean duration of cyles and avg time between cycles. Also, there is a negative correlation of 25% between the number of cycles and mean duration of cycles, which is quite intuitive sns.pairplot(df, kind="scatter", hue ='status') from scipy import stats result = stats.ttest_rel(df['avg time between cycles'],df['mean duration']) print(result) # A paired t-test is performed. The degree of freedom here is 99.The p-value associated with the t-score, is greater than 0.05 which implies the mean difference between the variables is not different from 0. The variables are statistically redundant whihc is also proved by a small t-score as they have also have a reasonable amount of correlation (22%) result = stats.ttest_rel(df['avg time between cycles'],df['number of cycles']) print(result) # A paired t-test is performed. The degree of freedom here is 99.The p-value associated with the t-score, is much lesser than 0.05 which implies the mean difference between the variables is ver much different from 0. The variables are statistically independent and relevent with very low correlation. # result = stats.ttest_rel(df['mean duration'],df['number of cycles']) print(result) op1=['covid'] op2=['non_covid'] covid_d=df[df['status'].isin(op1)]['mean duration'] ncovid_d=df[df['status'].isin(op2)]['mean duration'] result = stats.ttest_ind(covid_d,ncovid_d) print(result) # Independent t-tests were performed for Covid and non-covid data over the mean-duration of cycles variables. A low t-score and high p-value of 0.82>>0.05, retains the null-hypothesis that both the groups for this variable have a similar statistical distribution. op1=['covid'] op2=['non_covid'] covid_d=df[df['status'].isin(op1)]['avg time between cycles'] ncovid_d=df[df['status'].isin(op2)]['avg time between cycles'] result = stats.ttest_ind(covid_d,ncovid_d) print(result) # Independent t-tests were performed for Covid and non-covid data over the mean-duration of cycles variables. A low t-score and high p-value of 0.34>>0.05, retains the null-hypothesis that both the groups for this variable have a similar statistical distribution. Probably the 100 files chosen out of more than 400 is giving these statistics. If done on a larger dataset, better values could be observed.
_notebooks/2020-10-14-Cough-statisticalanalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import urllib3 train = pd.read_csv("order_products__train.csv") train.head() prior = pd.read_csv("order_products__prior.csv") prior.head() orders = pd.read_csv("orders.csv") orders.head() products = pd.read_csv("products.csv") products.head() departments = pd.read_csv("departments.csv") departments.head() aisles = pd.read_csv("aisles.csv") aisles.head() # + #On an average, about 59% of the products in an order are re-ordered products. # percentage of re-orders in prior set # def count(x): return len(x) prior_reorder = prior["reordered"].sum() / prior.shape[0] print(prior_reorder) # percentage of re-orders in train set # train_reorder = train["reordered"].sum() / train.shape[0] print(train_reorder) # - prior_no_reorder = prior.groupby(["order_id"]).reordered.aggregate("sum").reset_index() prior_no_reorder.reordered.loc[prior_no_reorder.reordered>1] = 1 print(prior_no_reorder.reordered.value_counts() / prior_no_reorder.shape[0]) print('\n') train_no_reorder = train.groupby(["order_id"]).reordered.aggregate("sum").reset_index() train_no_reorder.reordered.loc[train_no_reorder.reordered>1] = 1 print(train_no_reorder.reordered.value_counts() / train_no_reorder.shape[0]) # + cart = train.groupby(["order_id"]).add_to_cart_order.aggregate(np.max).reset_index() yaxis = cart.groupby(["add_to_cart_order"]).add_to_cart_order.aggregate(count) xaxis = [i for i in range(len(yaxis.tolist()))] plt.figure(figsize=(10,6)) plt.bar(xaxis,yaxis.tolist()) plt.ylabel('Number of Occurrences', fontsize=15) plt.xlabel('Number of products in the given order', fontsize=15) plt.xlim(0,80) plt.ylim(0,10000) plt.show() # - # It is a right tailed distribution and we can see that the max value at 5 new_prior = pd.merge(prior, products, on='product_id', how='left') new_prior.head() new_prior = pd.merge(new_prior, aisles, on='aisle_id', how='left') new_prior.head() prod_freq = new_prior['product_name'].value_counts().reset_index().head(10) prod_freq.columns = ['product_name', 'frequency_count'] prod_freq # we can see that banana is the product which is mostly purchased. And we can also see that most of them are organic products and they are either fruits or vegetables. aisl_freq = new_prior['aisle'].value_counts().reset_index().head(10) aisl_freq.columns = ["aisle", "frequency"] aisl_freq # As mentioned in the above inference, that fruits and vegetables are the most purchased, is supported here , by this result. #Based on previous section plt.figure(figsize=(10,6)) plt.bar(aisl_freq.index, aisl_freq["frequency"]) plt.ylabel("Number of Occurrences", fontsize=15) plt.xlabel("Aisle", fontsize=15) plt.title("No of Occurences of Top 10 Aisles", fontsize=20) plt.xticks(rotation="vertical") plt.show()
exploratory_analysis2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pricing Asian Barrier Spreads # ### Introduction # <br> # An Asian barrier spread is a combination of 3 different option types, and as such, combines multiple possible features that the Qiskit Finance option pricing framework supports: # # - [Asian option](https://www.investopedia.com/terms/a/asianoption.asp): The payoff depends on the average price over the considered time horizon. # - [Barrier Option](https://www.investopedia.com/terms/b/barrieroption.asp): The payoff is zero if a certain threshold is exceeded at any time within the considered time horizon. # - [(Bull) Spread](https://www.investopedia.com/terms/b/bullspread.asp): The payoff follows a piecewise linear function (depending on the average price) starting at zero, increasing linear, staying constant. # # Suppose strike prices $K_1 < K_2$ and time periods $t=1,2$, with corresponding spot prices $(S_1, S_2)$ following a given multivariate distribution (e.g. generated by some stochastic process), and a barrier threshold $B>0$. # The corresponding payoff function is defined as # # # $$ # P(S_1, S_2) = # \begin{cases} # \min\left\{\max\left\{\frac{1}{2}(S_1 + S_2) - K_1, 0\right\}, K_2 - K_1\right\}, & \text{ if } S_1, S_2 \leq B \\ # 0, & \text{otherwise.} # \end{cases} # $$ # # # In the following, a quantum algorithm based on amplitude estimation is used to estimate the expected payoff, i.e., the fair price before discounting, for the option # # # $$\mathbb{E}\left[ P(S_1, S_2) \right].$$ # # # The approximation of the objective function and a general introduction to option pricing and risk analysis on quantum computers are given in the following papers: # # - [Quantum Risk Analysis. <NAME>. 2018.](https://arxiv.org/abs/1806.06893) # - [Option Pricing using Quantum Computers. Stamatopoulos et al. 2019.](https://arxiv.org/abs/1905.02666) # + import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.interpolate import griddata # %matplotlib inline import numpy as np from qiskit import QuantumRegister, QuantumCircuit, Aer, execute, AncillaRegister, transpile from qiskit.circuit.library import IntegerComparator, WeightedAdder, LinearAmplitudeFunction from qiskit.utils import QuantumInstance from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem from qiskit_finance.circuit.library import LogNormalDistribution # - # ### Uncertainty Model # # We construct a circuit factory to load a multivariate log-normal random distribution into a quantum state on $n$ qubits. # For every dimension $j = 1,\ldots,d$, the distribution is truncated to a given interval $[\text{low}_j, \text{high}_j]$ and discretized using $2^{n_j}$ grid points, where $n_j$ denotes the number of qubits used to represent dimension $j$, i.e., $n_1+\ldots+n_d = n$. # The unitary operator corresponding to the circuit factory implements the following: # # $$\big|0\rangle_{n} \mapsto \big|\psi\rangle_{n} = \sum_{i_1,\ldots,i_d} \sqrt{p_{i_1\ldots i_d}}\big|i_1\rangle_{n_1}\ldots\big|i_d\rangle_{n_d},$$ # # where $p_{i_1\ldots i_d}$ denote the probabilities corresponding to the truncated and discretized distribution and where $i_j$ is mapped to the right interval using the affine map: # # $$ \{0, \ldots, 2^{n_j}-1\} \ni i_j \mapsto \frac{\text{high}_j - \text{low}_j}{2^{n_j} - 1} * i_j + \text{low}_j \in [\text{low}_j, \text{high}_j].$$ # # For simplicity, we assume both stock prices are independent and identically distributed. # This assumption just simplifies the parametrization below and can be easily relaxed to more complex and also correlated multivariate distributions. # The only important assumption for the current implementation is that the discretization grid of the different dimensions has the same step size. # + # number of qubits per dimension to represent the uncertainty num_uncertainty_qubits = 2 # parameters for considered random distribution S = 2.0 # initial spot price vol = 0.4 # volatility of 40% r = 0.05 # annual interest rate of 4% T = 40 / 365 # 40 days to maturity # resulting parameters for log-normal distribution mu = ((r - 0.5 * vol**2) * T + np.log(S)) sigma = vol * np.sqrt(T) mean = np.exp(mu + sigma**2/2) variance = (np.exp(sigma**2) - 1) * np.exp(2*mu + sigma**2) stddev = np.sqrt(variance) # lowest and highest value considered for the spot price; in between, an equidistant discretization is considered. low = np.maximum(0, mean - 3*stddev) high = mean + 3*stddev # map to higher dimensional distribution # for simplicity assuming dimensions are independent and identically distributed) dimension = 2 num_qubits=[num_uncertainty_qubits]*dimension low=low*np.ones(dimension) high=high*np.ones(dimension) mu=mu*np.ones(dimension) cov=sigma**2*np.eye(dimension) # construct circuit factory u = LogNormalDistribution(num_qubits=num_qubits, mu=mu, sigma=cov, bounds=(list(zip(low, high)))) # - # plot PDF of uncertainty model x = [ v[0] for v in u.values ] y = [ v[1] for v in u.values ] z = u.probabilities #z = map(float, z) #z = list(map(float, z)) resolution = np.array([2**n for n in num_qubits])*1j grid_x, grid_y = np.mgrid[min(x):max(x):resolution[0], min(y):max(y):resolution[1]] grid_z = griddata((x, y), z, (grid_x, grid_y)) fig = plt.figure(figsize=(10, 8)) ax = fig.gca(projection='3d') ax.plot_surface(grid_x, grid_y, grid_z, cmap=plt.cm.Spectral) ax.set_xlabel('Spot Price $S_1$ (\$)', size=15) ax.set_ylabel('Spot Price $S_2$ (\$)', size=15) ax.set_zlabel('Probability (\%)', size=15) plt.show() # ### Payoff Function # # For simplicity, we consider the sum of the spot prices instead of their average. # The result can be transformed to the average by just dividing it by 2. # # The payoff function equals zero as long as the sum of the spot prices $(S_1 + S_2)$ is less than the strike price $K_1$ and then increases linearly until the sum of the spot prices reaches $K_2$. # Then payoff stays constant to $K_2 - K_1$ unless any of the two spot prices exceeds the barrier threshold $B$, then the payoff goes immediately down to zero. # The implementation first uses a weighted sum operator to compute the sum of the spot prices into an ancilla register, and then uses a comparator, that flips an ancilla qubit from $\big|0\rangle$ to $\big|1\rangle$ if $(S_1 + S_2) \geq K_1$ and another comparator/ancilla to capture the case that $(S_1 + S_2) \geq K_2$. # These ancillas are used to control the linear part of the payoff function. # # In addition, we add another ancilla variable for each time step and use additional comparators to check whether $S_1$, respectively $S_2$, exceed the barrier threshold $B$. The payoff function is only applied if $S_1, S_2 \leq B$. # # The linear part itself is approximated as follows. # We exploit the fact that $\sin^2(y + \pi/4) \approx y + 1/2$ for small $|y|$. # Thus, for a given approximation scaling factor $c_\text{approx} \in [0, 1]$ and $x \in [0, 1]$ we consider # # $$ \sin^2( \pi/2 * c_\text{approx} * ( x - 1/2 ) + \pi/4) \approx \pi/2 * c_\text{approx} * ( x - 1/2 ) + 1/2 $$ for small $c_\text{approx}$. # # We can easily construct an operator that acts as # # $$\big|x\rangle \big|0\rangle \mapsto \big|x\rangle \left( \cos(a*x+b) \big|0\rangle + \sin(a*x+b) \big|1\rangle \right),$$ # # using controlled Y-rotations. # # Eventually, we are interested in the probability of measuring $\big|1\rangle$ in the last qubit, which corresponds to # $\sin^2(a*x+b)$. # Together with the approximation above, this allows to approximate the values of interest. # The smaller we choose $c_\text{approx}$, the better the approximation. # However, since we are then estimating a property scaled by $c_\text{approx}$, the number of evaluation qubits $m$ needs to be adjusted accordingly. # # For more details on the approximation, we refer to: # [Quantum Risk Analysis. <NAME>. 2018.](https://arxiv.org/abs/1806.06893) # # Since the weighted sum operator (in its current implementation) can only sum up integers, we need to map from the original ranges to the representable range to estimate the result, and reverse this mapping before interpreting the result. The mapping essentially corresponds to the affine mapping described in the context of the uncertainty model above. # + # determine number of qubits required to represent total loss weights = [] for n in num_qubits: for i in range(n): weights += [2**i] # create aggregation circuit agg = WeightedAdder(sum(num_qubits), weights) n_s = agg.num_sum_qubits n_aux = agg.num_qubits - n_s - agg.num_state_qubits # number of additional qubits # + # set the strike price (should be within the low and the high value of the uncertainty) strike_price_1 = 3 strike_price_2 = 4 # set the barrier threshold barrier = 2.5 # map strike prices and barrier threshold from [low, high] to {0, ..., 2^n-1} max_value = 2**n_s - 1 low_ = low[0] high_ = high[0] mapped_strike_price_1 = (strike_price_1 - dimension*low_) / (high_ - low_) * (2**num_uncertainty_qubits - 1) mapped_strike_price_2 = (strike_price_2 - dimension*low_) / (high_ - low_) * (2**num_uncertainty_qubits - 1) mapped_barrier = (barrier - low) / (high - low) * (2**num_uncertainty_qubits - 1) # - # condition and condition result conditions = [] barrier_thresholds = [2]*dimension n_aux_conditions = 0 for i in range(dimension): # target dimension of random distribution and corresponding condition (which is required to be True) comparator = IntegerComparator(num_qubits[i], mapped_barrier[i] + 1, geq=False) n_aux_conditions = max(n_aux_conditions, comparator.num_ancillas) conditions += [comparator] # + # set the approximation scaling for the payoff function c_approx = 0.25 # setup piecewise linear objective fcuntion breakpoints = [0, mapped_strike_price_1, mapped_strike_price_2] slopes = [0, 1, 0] offsets = [0, 0, mapped_strike_price_2 - mapped_strike_price_1] f_min = 0 f_max = mapped_strike_price_2 - mapped_strike_price_1 objective = LinearAmplitudeFunction( n_s, slopes, offsets, domain=(0, max_value), image=(f_min, f_max), rescaling_factor=c_approx, breakpoints=breakpoints ) # + # define overall multivariate problem qr_state = QuantumRegister(u.num_qubits, 'state') # to load the probability distribution qr_obj = QuantumRegister(1, 'obj') # to encode the function values ar_sum = AncillaRegister(n_s, 'sum') # number of qubits used to encode the sum ar_cond = AncillaRegister(len(conditions) + 1, 'conditions') ar = AncillaRegister(max(n_aux, n_aux_conditions, objective.num_ancillas), 'work') # additional qubits objective_index = u.num_qubits # define the circuit asian_barrier_spread = QuantumCircuit(qr_state, qr_obj, ar_cond, ar_sum, ar) # load the probability distribution asian_barrier_spread.append(u, qr_state) # apply the conditions for i, cond in enumerate(conditions): state_qubits = qr_state[(num_uncertainty_qubits * i):(num_uncertainty_qubits * (i + 1))] asian_barrier_spread.append(cond, state_qubits + [ar_cond[i]] + ar[:cond.num_ancillas]) # aggregate the conditions on a single qubit asian_barrier_spread.mcx(ar_cond[:-1], ar_cond[-1]) # apply the aggregation function controlled on the condition asian_barrier_spread.append(agg.control(), [ar_cond[-1]] + qr_state[:] + ar_sum[:] + ar[:n_aux]) # apply the payoff function asian_barrier_spread.append(objective, ar_sum[:] + qr_obj[:] + ar[:objective.num_ancillas]) # uncompute the aggregation asian_barrier_spread.append(agg.inverse().control(), [ar_cond[-1]] + qr_state[:] + ar_sum[:] + ar[:n_aux]) # uncompute the conditions asian_barrier_spread.mcx(ar_cond[:-1], ar_cond[-1]) for j, cond in enumerate(reversed(conditions)): i = len(conditions) - j - 1 state_qubits = qr_state[(num_uncertainty_qubits * i):(num_uncertainty_qubits * (i + 1))] asian_barrier_spread.append(cond.inverse(), state_qubits + [ar_cond[i]] + ar[:cond.num_ancillas]) print(asian_barrier_spread.draw()) print('objective qubit index', objective_index) # - # plot exact payoff function plt.figure(figsize=(7,5)) x = np.linspace(sum(low), sum(high)) y = (x <= 5)*np.minimum(np.maximum(0, x - strike_price_1), strike_price_2 - strike_price_1) plt.plot(x, y, 'r-') plt.grid() plt.title('Payoff Function (for $S_1 = S_2$)', size=15) plt.xlabel('Sum of Spot Prices ($S_1 + S_2)$', size=15) plt.ylabel('Payoff', size=15) plt.xticks(size=15, rotation=90) plt.yticks(size=15) plt.show() # + tags=["nbsphinx-thumbnail"] # plot contour of payoff function with respect to both time steps, including barrier plt.figure(figsize=(7,5)) z = np.zeros((17, 17)) x = np.linspace(low[0], high[0], 17) y = np.linspace(low[1], high[1], 17) for i, x_ in enumerate(x): for j, y_ in enumerate(y): z[i, j] = np.minimum(np.maximum(0, x_ + y_ - strike_price_1), strike_price_2 - strike_price_1) if x_ > barrier or y_ > barrier: z[i, j] = 0 plt.title('Payoff Function', size=15) plt.contourf(x, y, z) plt.colorbar() plt.xlabel('Spot Price $S_1$', size=15) plt.ylabel('Spot Price $S_2$', size=15) plt.xticks(size=15) plt.yticks(size=15) plt.show() # - # evaluate exact expected value sum_values = np.sum(u.values, axis=1) payoff = np.minimum(np.maximum(sum_values - strike_price_1, 0), strike_price_2 - strike_price_1) leq_barrier = [ np.max(v) <= barrier for v in u.values ] exact_value = np.dot(u.probabilities[leq_barrier], payoff[leq_barrier]) print('exact expected value:\t%.4f' % exact_value) # ### Evaluate Expected Payoff # # We first verify the quantum circuit by simulating it and analyzing the resulting probability to measure the $|1\rangle$ state in the objective qubit. num_state_qubits = asian_barrier_spread.num_qubits - asian_barrier_spread.num_ancillas print('state qubits: ', num_state_qubits) transpiled = transpile(asian_barrier_spread, basis_gates=['u', 'cx']) print('circuit width:', transpiled.width()) print('circuit depth:', transpiled.depth()) job = execute(asian_barrier_spread, backend=Aer.get_backend('statevector_simulator')) # + # evaluate resulting statevector value = 0 for i, a in enumerate(job.result().get_statevector()): b = ('{0:0%sb}' % num_state_qubits).format(i)[-num_state_qubits:] prob = np.abs(a)**2 if prob > 1e-4 and b[0] == '1': value += prob # all other states should have zero probability due to ancilla qubits if i > 2**num_state_qubits: break # map value to original range mapped_value = objective.post_processing(value) / (2**num_uncertainty_qubits - 1) * (high_ - low_) print('Exact Operator Value: %.4f' % value) print('Mapped Operator value: %.4f' % mapped_value) print('Exact Expected Payoff: %.4f' % exact_value) # - # Next we use amplitude estimation to estimate the expected payoff. # Note that this can take a while since we are simulating a large number of qubits. The way we designed the operator (asian_barrier_spread) implies that the number of actual state qubits is significantly smaller, thus, helping to reduce the overall simulation time a bit. # + # set target precision and confidence level epsilon = 0.01 alpha = 0.05 qi = QuantumInstance(Aer.get_backend('aer_simulator'), shots=100) problem = EstimationProblem(state_preparation=asian_barrier_spread, objective_qubits=[objective_index], post_processing=objective.post_processing) # construct amplitude estimation ae = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi) # - result = ae.estimate(problem) conf_int = np.array(result.confidence_interval_processed) / (2**num_uncertainty_qubits - 1) * (high_ - low_) print('Exact value: \t%.4f' % exact_value) print('Estimated value:\t%.4f' % (result.estimation_processed / (2**num_uncertainty_qubits - 1) * (high_ - low_))) print('Confidence interval: \t[%.4f, %.4f]' % tuple(conf_int)) import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
docs/tutorials/07_asian_barrier_spread_pricing.ipynb
# # Cars data cleanup dbutils.widgets.text("storage_account_name", "") dbutils.widgets.text("storage_account_key", "") import pyspark.sql.functions as F from os.path import splitext # + storage_account_name = dbutils.widgets.get("storage_account_name") storage_account_key = dbutils.widgets.get("storage_account_key") blob_path_root = f"wasbs://carsdata@{storage_account_name}.blob.core.windows.net/" spark.conf.set(f"fs.azure.account.key.{storage_account_name}.blob.core.windows.net", storage_account_key) # + def get_file_url(file_name): return f"{blob_path_root}/{file_name}" def write_csv(data_frame, file_name): data_frame.write.format("csv").mode("overwrite").option("header", "true").save(get_file_url(file_name)) # + files = ["Make.json", "MakeModel.json", "Option.json", "OptionValue.json", "Series.json", "Specification.json", "SpecificationValue.json", "Trim.json", "Generation.json"] data_frames = {} get_key = lambda file_name: splitext(file_name)[0].lower() for file in files: df = spark.read.json(get_file_url(file)) data_frames.update({get_key(file): df}) # - makes_df = data_frames["make"] models_df = data_frames["makemodel"] options_df = data_frames["option"] optionvalues_df = data_frames["optionvalue"] series_df = data_frames["series"] specs_df = data_frames["specification"] specvalues_df = data_frames["specificationvalue"] trims_df = data_frames["trim"] generations_df = data_frames["generation"] specs_with_parent_names_df = ( specs_df.alias("df1") .join(specs_df.alias("df2"), F.col("df1.id") == F.col("df2.parent_id"), "inner") .select( F.col("df2.id").alias("specification_id"), F.col("df2.name").alias("specification_name"), F.col("df1.name").alias("specification_category")) ) specs_with_values_df = (specvalues_df .join(specs_with_parent_names_df, on = ['specification_id'], how = 'inner') .select( F.col("trim_id"), F.col("specification_name"), F.col("value").alias("specification_value"), F.col("unit").alias("specification_unit"), F.col("specification_category"))) trims_df = (trims_df .withColumnRenamed("id", "trim_id") .withColumnRenamed("name", "trim_name")) trim_specs_df = trims_df.join(specs_with_values_df, ["trim_id"], "inner") series_df = (series_df.withColumnRenamed("id", "series_id") .withColumnRenamed("name", "series_name") .join(trim_specs_df, ["series_id"], "inner") .drop("mode_id", "trim_id", "model_id") ) generations_df = (generations_df .withColumnRenamed("name", "generation_name") .withColumnRenamed("id", "generation_id") .withColumnRenamed("year_end", "generation_year_end") .withColumnRenamed("year_start", "generation_year_start")) series_generations_df = series_df.join(generations_df, ["generation_id"], "inner") models_df = models_df.withColumnRenamed("name", "model_name") model_details_df = (models_df.join(series_generations_df, "model_id", "inner") .drop("model_id") .drop("generation_id") .drop("series_id") .withColumnRenamed("year_production_end", "trim_year_production_end") .withColumnRenamed("year_production_start", "trim_year_production_start") ) makes_df = makes_df.withColumnRenamed("name", "make_name").withColumnRenamed("id", "make_id") make_model_details_df = makes_df.join(model_details_df, "make_id", "inner").drop("make_id") cars_df = make_model_details_df count1 = cars_df.count() write_csv(cars_df, "cars_complete_csv") test_df = spark.read.csv(get_file_url("cars_complete_csv"), header = True) count2 = test_df.count() count1 == count2 == 2025118
datacleanup/cleanup_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Orchestrating TFX pipelines on Google Cloud with Vertex Pipelines # ## Learning objectives # 1. Use the TFX CLI to build a TFX pipeline container. # 2. Deploy a TFX pipeline container to Vertex Pipelines on Google Cloud. # 3. Create and monitor a TFX pipeline run using the Vertex Pipelines UI. # In this lab, you will utilize the following tools and Google Cloud services to build a TFX pipeline that orchestrates the training and deployment of a TensorFlow classifier to predict forest cover type from tabular cartographic data: # # * The [**TFX CLI**](https://www.tensorflow.org/tfx/guide/cli) utility to build and deploy a TFX pipeline. # * [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines) for TFX pipeline orchestration. # * [**Dataflow**](https://cloud.google.com/dataflow) for scalable, distributed data processing for TFX Beam-based components. # * A [**Vertex Training**](https://cloud.google.com/ai-platform/) job for model training and flock management of tuning trials. # * [**Vertex Prediction**](https://cloud.google.com/ai-platform/), a model server destination for blessed pipeline model versions. # * [**CloudTuner**](https://www.tensorflow.org/tfx/guide/tuner#tuning_on_google_cloud_platform_gcp) (KerasTuner implementation) and [**Vertex Vizier**](https://cloud.google.com/ai-platform/optimizer/docs/overview) for advanced model hyperparameter tuning. # ## Setup # ### Define constants # Add required libraries to Python PATH. # PATH=%env PATH # %env PATH={PATH}:/home/jupyter/.local/bin # + PROJECT_ID = !(gcloud config get-value core/project) PROJECT_ID = PROJECT_ID[0] PROJECT_NUMBER= !$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") REGION = 'us-central1' # !echo {PROJECT_ID} # !echo {PROJECT_NUMBER} # !echo {REGION} # - # ### Configure service accounts for your project for Vertex Pipelines # ! gcloud services enable \ # compute.googleapis.com \ # iam.googleapis.com \ # cloudbuild.googleapis.com \ # container.googleapis.com \ # notebooks.googleapis.com \ # aiplatform.googleapis.com \ # dataflow.googleapis.com \ # bigquery.googleapis.com \ # bigquerydatatransfer.googleapis.com \ # artifactregistry.googleapis.com \ # cloudresourcemanager.googleapis.com \ # cloudtrace.googleapis.com \ # iamcredentials.googleapis.com \ # monitoring.googleapis.com \ # logging.googleapis.com SERVICE_ACCOUNT_ID=tfx-vertex-pipelines-sa gcloud iam service-accounts create $SERVICE_ACCOUNT_ID \ --description="TFX on Google Cloud Vertex Pipelines" \ --display-name="TFX Vertex Pipelines service account" \ --project=$PROJECT_ID # Add Vertex Pipelines gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:${SERVICE_ACCOUNT_ID}@${PROJECT_<EMAIL>" \ --role="roles/aiplatform.user" # Add BigQuery gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.<EMAIL>" \ --role="roles/bigquery.user" # + # Add GCS gcloud projects add-iam-policy-binding $PROJECT_ID \ --member=serviceAccount:${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.<EMAIL> \ --role=roles/storage.objectAdmin # gsutil iam ch \ # serviceAccount:${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com:roles/storage.objectCreator \ # $BUCKET_NAME # gsutil iam ch \ # serviceAccount:${SERVICE_ACCOUNT_ID}@${PROJECT_<EMAIL>:roles/storage.objectViewer \ # $BUCKET_NAME # https://cloud.google.com/vertex-ai/docs/pipelines/configure-project gcloud iam service-accounts add-iam-policy-binding \ $SERVICE_ACCOUNT_ID@$PROJECT_ID.iam.gserviceaccount.com \ --member="user:<EMAIL>" \ --role="roles/iam.serviceAccountUser" # - # ### Create a storage bucket to store pipeline artifacts # + GCS_BUCKET = f"gs://{PROJECT_ID}-bucket" ARTIFACT_STORE_URI = os.path.join(GCS_LOCATION, "tfx_artifacts") # !echo {BUCKET_NAME} # - # !gsutil ls -al $BUCKET_NAME # ### Import libraries # + import tensorflow as tf from tfx import v1 as tfx import kfp print('TensorFlow version: {}'.format(tf.__version__)) print('TFX version: {}'.format(tfx.__version__)) print('KFP version: {}'.format(kfp.__version__)) # - # ## Review the TFX pipeline design pattern for Google Cloud # %cd pipeline # !ls -la
self-paced-labs/vertex-pipelines/tfx/lab_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <font size="6"><center><b>TECNOLร“GICO DE MONTERREY</b></center></font> # # <br> # # <font size="5"><center><b>Tรฉcnicas computacionales de aprendizaje automรกtico</b></center></font> # # <font size="5"><center><b>(Machine Learning Techniques)</b></center></font> # # <br> # # <font size="5"><center><em> Profr. Dr. <NAME></em></center></font> # # <br> # # <font size="5"><center><b>Homework 04. Data Visualization From Patterns</b></center></font> # # <br> # # <font size="4"><center><em>Authors:</em></center></font> # # <br> # # <font size="5"><center><em> <NAME> (A01749803)</em></center></font> # # <font size="5"><center><em> <NAME> (A01748867)</em></center></font> # # <font size="5"><center><em> <NAME> (A01184125)</em></center></font> # # <br> # # <font size="4"><center> November 11, 2019</center></font> # <font size="5"><center><b>1. Introduction</b></center></font> # <br> # # <body><font size="3">Visualising data is a complete field of study with very prominent importance, due to the dificulty of seeing how data behaves, how a model classifies or how a patter covers a percentage of the data just using numbers, to help make it easier for us humans to see these relations several techniques have been proposed throught out the years. For this assignment we explored diffenrent ways to visualize data, from pre-stablished methods to some less common ones. Using diferent datasets each of them with different characteristics we obtained many patterns and then we proceeded to analise their composition and try to obtain the best way for their visualization. Each of the followin sections focuses on the data visualization designs we propose as well as some examples, each with different amounts of attributes. </font></body> # <font size="5"><b>2. Visualization for numerical items in binary classification</b></font> # # <br> # # <body><font size="3">In this section we will describe our visualization proposal for univariate contrast patterns in a binary classification problem. We designed a different graphical visualization for one, two, three, and four numerical items. Below we describe each proposal and its application in the patterns found in the previous assignments:</font></body> # <font size="5"><b>2.1 One numerical item, binary classification (boxplots)</b></font> # # <br> # # <font size="4"><b>2.1.1 Design description</b></font> # # <br> # # <body><font size="3">For one numerical item, we propose <b>boxplots</b> and <b>violin plots</b>. A box and whisker plot โ€”also called a box plotโ€” displays the five-number summary of a set of data. The five-number summary is the minimum, first quartile, median, third quartile, and maximum. In a box plot, we draw a box from the first quartile to the third quartile. A vertical line goes through the box at the median. The whiskers go from each quartile to the minimum or maximum. A boxplot is a standardized way of displaying the distribution of data, it can show the outliers and their values, it can also show if the data is symmetrical, how tightly it is grouped, and if and how the data is skewed. Figure 1 shows the parts of a boxplot, which are covered with our visualization proposal. The boxplots shown in this section were created with <em>matplotlib</em>, for violin plots we used <em>altair</em> library. In both cases, we are using a different fill color for each class.</font></body> # # <figure> # <img src="boxplot.png"> # <figcaption><center>Figure 1: Different parts of a boxplot.</center></figcaption> # </figure> # # <body><font size="3">Boxplots are convenient for comparing summary statistics (such as range and quartiles) but does not let you see variations in the data. For multimodal distributions (those with multiple peaks) this can be particularly limiting. A violin plot is a hybrid of a box plot and a kernel density plot, which shows peaks in the data. Figure 2 shows how the peaks of a violin plot represents the probability. </font></body> # # <body><font size ="3">How to decide between boxplots and violin plots? We suggest boxplots when most of the values are clustered around the median. For a different distribution (e.g. if values are clustered around the minimum and the maximum with nothing in the middle) we suggest violin plots. There exists a visualization that overlays a boxplot to a violin plot, but we considered it not so friendly. The boxplots and violin graphs showed in section 2.1.2 are independent, we show them together in this document in order to compare them.</font></body> # # <figure> # <img src="violinplot2.png" height="500" width="400"> # <figcaption><center>Figure 2: Violin plot.</center></figcaption> # </figure> # <font size="4"><b> 2.1.2 Visualization of Universities Patterns</b></font> # # <br> # # <body><font size ="3"><b>Pattern:</b> outputs_in_top_25percent_citation_avg > 3216.40} [0.62, 0.07]</font> # </body> # # <br> # <br> # # <body><font size ="3">This pattern indicates that the best universities have, on average, more than 3216 outputs in the 25% of the most-cited papers in Scopus. The pattern has 0.62 support for class_0 and 0.07 support for class_1. In the following violin plot we can easily see that the widest part of class_0 is over the highlighted boundary of the pattern (3216.40). Some instances of class_0 are also found in higher values of the vertical axis. On the other hand, class_1 is more crowded below the boundary (3216.40).</font></body> # ### 2.1.2 a) One numerical item, binary classification (box plots) # + import pandas as pd import seaborn as sns df = pd.read_csv("Data/filteredData_WithClass.csv") # Draw a vertical boxplot grouped # by a categorical variable: sns.set_style("whitegrid") sns.boxplot(x = 'class', y = 'outputs_in_top_25percent_citation_avg', data = df) # Other attributes (one item patterns) to create the graph: # outputs_in_top_25percent_citation_avg # citations_per_publication_articles_conference_papers_growth # outputs_in_top_1percent_citation_avg # - # ### 2.1.2 b) One numerical item, binary classification (violin plots) # + import altair as alt from altair import datum from vega_datasets import data #lt.renderers.enable('notebook') data = pd.read_csv('data/filteredData_WithClass.csv') alt.Chart(data).transform_filter( datum.outputs_in_top_25percent_citation_avg > 0 ).transform_bin( ['bin_max', 'bin_min'], field='outputs_in_top_25percent_citation_avg', bin=alt.Bin(maxbins=20) ).transform_calculate( binned=(datum.bin_max + datum.bin_min) / 2 ).transform_aggregate( value_count='count()', groupby=['class', 'binned'] ).transform_impute( impute='value_count', groupby=['class'], key='binned', value=0 ).mark_area( interpolate='monotone', orient='horizontal' ).encode( x=alt.X( 'value_count:Q', title=None, stack='center', axis=alt.Axis(labels=False, values=[0],grid=False, ticks=True), ), y=alt.Y('binned:Q', bin='binned', title='outputs_in_top_25percent_citation_avg'), color=alt.Color('class:N', legend=None), column=alt.Column( 'class:N', header=alt.Header( titleOrient='bottom', labelOrient='bottom', labelPadding=0, ), ), ).properties( width=80 ).configure_facet( spacing=0 ).configure_view( stroke=None ) # Other attributes (one item patterns) to create the graph: # outputs_in_top_25percent_citation_avg # citations_per_publication_articles_conference_papers_growth # outputs_in_top_1percent_citation_avg # - # <font size="5"><b>2.2 Two numerical items, binary classification</b></font> # # <br> # # <font size="4"><b>2.2.1 Design description</b></font> # # <br> # # <body><font size="3">For two numerical items, we designed a colored scatter plot because it allow us to observe relationships between variables. Identification of correlational relationships are common with scatter plots. In these cases, we want to know, if we were given a particular horizontal value, what a good prediction would be for the vertical value. Relationships between variables can be described in many ways: positive or negative, strong or weak, linear or nonlinear. Figure 3 summarizes this relationships.</font></body> # # <br> # <br> # # <body><font size="3">The design of the proposed visualization consists of using each axis for the values of a different attribute, and using colors to distinguish classes. This graph allow us to easily see the most crowded points of each attribute as well as identify the furthest points or outliers. We can also see the correlation between both attributes. The proposed visualization is interactive, that allow us to zoom in and out as needed. We used <em>altair</em> library to construct this graphs. </font></body> # # <figure> # <img src="scatter-plot.png" height="500" width="400"> # <figcaption><center>Figure 3: A brief description of relationships between variables in scatter plots.</center></figcaption> # </figure> # ### 2.2.2 Visualization of Universities Pattern # <body><font size ="3"><b>Pattern:</b> outputs_in_top_10percent_citation_2016 > 1774.50 AND outputs_in_top_10percent_citation_articles_reviews_conference_papers_growth > -31.38 [0.48, 0.00]</font> # </body> # # <br> # <br> # # <body><font size ="3">This pattern has 0.48 support for the class_0 and 0.0 support for class_1. The first attribute means that the best universities had more than 1774.50 outputs in the 10% of the most-cited documents in Scopus, during 2016. The second attribute means that they also have a decrease (or negative growth) bigger than 31.38 in the outputs in the 10% of the most-cited articles, reviews and conference papers in Scopus. In the following graphs we can appreciate that, due to the distribution of the points, we can conclude that there is a moderate negative relationship between this two attributes. This graph allow us to move the grid and zoom in the graph, in order to explore in more detail the areas of interest.</font></body> # + import altair as alt from vega_datasets import data data = pd.read_csv('Data/filteredData_WithClass.csv') alt.Chart(data).mark_circle().encode( x='outputs_in_top_10percent_citation_2016', y='outputs_in_top_10percent_citation_articles_reviews_conference_papers_growth', color='class', ).interactive() # - # <font size="5"><b>2.3 Three numerical items, binary classification</b></font> # # <br> # # <font size="4"><b>2.3.1 Design description</b></font> # # <br> # # <body><font size="3">For patterns with three numerical items, we used multifeatured colored scatter plots. So, the description of 2.2.1 applies to this visualization. We used <em>altair</em> library. In this visualization, we are still using each axis for the values of a different attribute and the size of each circle is according to the value of the third attribute. It is important to select which attribute will be assigned to those three positions because the resulting graph could be very different, as exemplified in the following graphs.</font></body> # ### 2.3.2 Visualization of Universities Pattern # # <body><font size ="3"><b>Pattern:</b> scholarly_output_2017 $>$ 7855.50 AND citation_count_growth_acc $\le$ -3759.50 AND publications_in_top_5percent_journal_growth_acc $\le$ 405.00 [0.48, 0.00]</font> # </body> # # <br> # <br> # # <body><font size ="3">This pattern has 0.48 support for the class\_0 and 0.0 support for class\_1. The pattern indicates that the best universities had more than 7855 publications in 2017, an accumulated decrease (or negative growth) in citation count smaller to 3759 and a growth smaller or equal to 405 in outputs in the 5\% of the best journals in Scopus.</font></body> # # <body><font size ="3">This pattern can be visualized in the following graphs. It is remarkable the difference obtained just by changing the position of the attributes. The attributes related to the accumulated growth in citations and publications were assigned to the graph axis can be seen in the first graph, while the size of the circles indicates the value of the scholarly output. This way, we can clearly see the blue and orange points that distinguish each class. The size of the circles is also visible. With respect to the relationships between variables, the points are scattered with a slightly marked agglomeration in the upper central part and bigger circles are in the outside of such agglomeration; but we could conclude that there is not relationship between the attributes. On the other hand, for the second graph we used axis for scholarly output and the citation count growth, while the size of the circles refers to the growth in publications. This last visualization shows a strong negative relation between the attributes in the axis.</font></body> # # <body><font size ="3"> # + import altair as alt from vega_datasets import data data = pd.read_csv('Data/filteredData_WithClass.csv') alt.Chart(data).mark_circle().encode( alt.X('publications_in_top_5percent_journal_growth_acc', scale=alt.Scale(zero=False)), alt.Y('citation_count_growth_acc', scale=alt.Scale(zero=False, padding=1)), color='class', size='scholarly_output_2017' ) alt.Chart(data).mark_circle().encode( alt.X('citation_count_growth_acc', scale=alt.Scale(zero=False)), alt.Y('publications_in_top_5percent_journal_growth_acc', scale=alt.Scale(zero=False, padding=1)), color='class', size='publications_in_top_5percent_journal_growth_acc' ) # - import altair as alt data = pd.read_csv('Data/filteredData_WithClass.csv') alt.Chart(data).mark_circle().encode( alt.X('publications_in_top_1percent_journal_2017:Q', scale=alt.Scale(type='log')), alt.Y('citations_per_publication_articles_conference_papers_avg:Q', scale=alt.Scale(zero=False)), color='class:N', size='outputs_in_top_25percent_citation_articles_only_2017:Q' ) # <font size="5"><b>2.4 Four numerical items, binary classification</b></font> # # <br> # # <font size="4"><b>2.4.1 Design description</b></font> # # <br> # # <body><font size="3">For patterns with four numerical items, we used 3D scatter plots. So, the description of 2.2.1 applies to this visualization. We used the <em>plotly</em> library. In this visualization, we are still using each axis for the values of a different attribute and the color indicates the value of the fourth attribute. Changing the attribute that will be assigned to the color scale (not to an axis) will result in a different graph. </font></body> # # <body><font size="3">We tested a variant in which the color indicates the class and the size of each circle is according to the value of the fourth attribute (as exemplified in Figure 4). The code of such graph is delivered in the Jupyter Notebook, nevertheless for this particular dataset we preferred the first visualization proposal that does not indicate the class (Figures</font></body> # # <figure> # <img src="4ItemScatterplot.png" height="700" width="700"> # <figcaption><center>Figure 4: Multifeature Scatter Plot for 4 item patterns a). The color indicates the attribute "out-puts_in_top_10percent_citation_articles_only_2017", the darkest blue corresponds to the lower valuesand the lightest corresponds to the highest values.</center></figcaption> # </figure> # # ### 2.4.2 4D scatterplot (no class distinction) # + import pandas as pd import plotly import plotly.graph_objs as go #Read data from csv data = pd.read_csv('Data/filteredData_WithClass.csv') #Set marker properties markercolor = data['outputs_in_top_10percent_citation_articles_only_2017'] #Make Plotly figure fig1 = go.Scatter3d(x=data['outputs_in_top_25percent_citation_avg'], y=data['citations_per_publication_articles_conference_papers_growth'], z=data['publications_in_top_1percent_journal_2017'], marker=dict(color=markercolor, opacity=1, reversescale=True, colorscale='Blues', size=5), line=dict (width=0.02), mode='markers') #Make Plot.ly Layout mylayout = go.Layout(scene=dict(xaxis=dict( title="outputs_top25_citation_avg"), yaxis=dict( title="citations_p/public_growth"), zaxis=dict(title="public_top1_journals_2017")),) #Plot and save html plotly.offline.plot({"data": [fig1], "layout": mylayout}, auto_open=True, filename=("4DPlot.html")) # - # ### 2.4.3 4D scatterplot (with class distinction) # + import pandas as pd import plotly import plotly.graph_objs as go #Read cars data from csv data = pd.read_csv("Data/filteredData_WithClass.csv") #Set marker properties markersize = data['outputs_in_top_10percent_citation_articles_only_2017']/100 markercolor = data['class1'] #Make Plotly figure fig1 = go.Scatter3d(x=data['outputs_in_top_25percent_citation_avg'], y=data['citations_per_publication_articles_conference_papers_growth'], z=data['publications_in_top_1percent_journal_2017'], marker=dict(size=markersize, color=markercolor, opacity=0.9, reversescale=True, colorscale='Greys'), line=dict (width=0.02), mode='markers') #Make Plot.ly Layout mylayout = go.Layout(scene=dict(xaxis=dict( title="outputs_top25_citation_avg"), yaxis=dict( title="citations_per_publication_growth"), zaxis=dict(title="publications_top1_journal_2017")),) #Plot and save html plotly.offline.plot({"data": [fig1], "layout": mylayout}, auto_open=True, filename=("5D Plot.html")) # - # <font size="5"><b>3. Visualization for categorical items</b></font> # # <br> # # <body><font size="3">In this section we will describe our visualization proposal for univariate contrast patterns in a binary classification problem with only categorical data. We proposed the usage of Alluvial diagrams for the visualization of patterns with one, two, three and four attributes:</font></body> # + import imp import pandas as pd import plotly.graph_objects as go import create_table as ct from pathlib import Path def create_sankey(df: pd.DataFrame, pattern: str, class_name, num_classes=0): patterns = ct.convert_pattern(pattern) r = ct.analyze_initial(df, patterns[:-1], patterns[-1], class_name) labels = ct.get_all_keys(r) sources, dests, flows, ways = ct.get_sources_and_dest(r, labels) node_colors = ct.get_colors_nodes(labels, num_classes) links_colors = ct.get_colors_links(sources, dests, labels, ways) fig = go.Figure(data=[go.Sankey( node = dict( pad = 15, thickness = 20, line = dict(color = "black", width = 0.5), label = labels, color = node_colors ), link = dict( source = sources, target = dests, value = flows, color = links_colors ))]) fig.update_layout(title_text=pattern, font_size=10) fig.show() # - p = Path("Data/mushrooms.csv") d = pd.read_csv(p) pattern = "odor != 'none' AND bruises = 'no'[0.83 0.00]" create_sankey(d, pattern, "type" ) pattern = "gill_size = 'broad' AND odor != 'foul' AND ring_number = 'one'[0.00 0.81]" create_sankey(d, pattern, "type") pattern = "bruises = 'no' AND gill_spacing = 'close' AND stalk_color_above_ring != 'orange' AND odor != 'none'[0.81 0.00]" create_sankey(d, pattern, "type") # <font size="5"><b>3.1 One numerical item, multiclass classification</b></font> # # <br> # # <font size="4"><b>3.1.1 Design description</b></font> # # <br> # # <body><font size="3">Alluvial diagrams are a form of Sankey diagrams, used nowadays as a tool for categorical data visualisation. It differs from the standard Sankey diagrams due to the fact that they only flow forward representing changes in group composition between attributes. The data in this diagrams is grouped into flows that can easily be traced, this proves very useful for data visualisation.</font></body> # # <br> # # <body><font size="3">For this particular work, each vertical block of the diagram represents attributes of the instances that compose a data-set, the height of such blocks represents the amount of instances that contain that attribute. The connections between the blocks going from left to right, represent how instances split or unify according to the attributes they contain, where the width of the connections show the proportion of instances that move to the next block. E.g. in figure \ref{fig:attr1} we can see the diagram of a pattern obtained from a data-set that separates \textit{poisonous} from \textit{non poisonous} mushrooms, the pattern is compose by one attribute ('odor') and it covers about 55\% of the instances labelled as poisonous of the data. The green flux shows those instances that are covered by the pattern while the grey flux represents those instances that are not covered by the pattern.</font></body> # <font size="5"><b>4. Visualization for numerical items in multiclass classification</b></font> # # <br> # # <body><font size="3">For multiclass classification we used the dataset "cars", which is published in the UCI Machine Learning Repository. The proposed visualizations are basically the same than those for numerical items in binary classification (violin plots and scatter plots), therefore we will not repeated the design description. </font></body> # <font size="5"><b>4.1 One numerical item, multiclass classification</b></font> # # <br> # # <font size="4"><b>4.1.1 Design description</b></font> # # <br> # # <body><font size="3">We created violin plots using \textit{seaborn}, just to test a different library. Following the same design as the one for singleclass classification this time we present one next to the other the plots obtained for each of the classes of the data. This violin plots clearly show the median. The vertical axis indicates the value of the numerical attribute and the horizontal axis shows the different classes.</font></body> # # <figure> # <img src="1Item_Multiclass.png"height="500" width="500"> # <figcaption><center>Figure 5: Visualization for 1 numerical item in multiclass classification. The vertical axis indicates the length value and the horizontal axis shows the different body styles as classes.</center></figcaption> # </figure> # ### 4.1.2 One numerical item, multiclass import seaborn as sns sns.set(style="whitegrid") cars = pd.read_csv('Data/cars.csv') #ax = sns.violinplot(x=cars["price"]) ax = sns.violinplot(x="body-style", y="length", data=cars) # <font size="5"><b>4.2 Two numerical item, multiclass classification</b></font> # # <br> # # <font size="4"><b>4.2.1 Design description</b></font> # # <br> # # <body><font size="3">The visualization for two numerical items is a colored scatter plot, using each axis for the value of each attribute. In the case of multi class, each class becomes clearly distinguishable through colors. The graph below shows two attributes and the values each of the classes has. In this particular case a negative relationship between the attributes becomes extremely evident.</font></body> # ### 4.2.2 Scatter plot for 2 attributes multiclass # + import altair as alt from vega_datasets import data data = pd.read_csv('Data/cars.csv') alt.Chart(data).mark_circle().encode( x='city-mpg', y='engine-size', color='num-of-cylinders', ).interactive() # - # <font size="5"><b>4.3 Three numerical items, multiclass classification</b></font> # # <br> # # <font size="4"><b>4.3.1 Design description</b></font> # # <br> # # <body><font size="3">The visualization for three numerical items is a colored scatter plot, using each axis for the value of each attribute plus a third attribute represented as the diamater of each the circle, changing the overall size of the marker. The graph below shows two attributes on the axis, and a third one as the size of the circle. In this example the data is labeled under differnt categorical classes each clearly distinguishable through different colors.</font></body> # + import altair as alt from vega_datasets import data data = pd.read_csv('Data/cars.csv') alt.Chart(data).mark_circle().encode( alt.X('length', scale=alt.Scale(zero=False)), alt.Y('width', scale=alt.Scale(zero=False, padding=1)), color='body-style', size='city-mpg' ) # - # <font size="5"><b>4.4 Four numerical item, multiclass</b></font> # # <br> # # <font size="4"><b>4.4.1 Design description</b></font> # # <br> # # <body><font size="3">For this particular case we created scatter plot using \textit{plotly}, however the previous library can be used too. Three attributes are assigned to each axis and the fourth attribute is indicated using a color scale. The class is represented thwough different symbols, one for each class, which in the below examples are: class_1 = circle, class_2 = diamond, class_3 = square and class_4 = X. Classes can be added and deleted interactively in the following graphs.</font></body> # + import plotly.express as px data = pd.read_csv("Data/cars.csv") fig = px.scatter_3d(data, x='curb-weight', y='horsepower', z='price', color='Num-of-cylinders2', size='Num-of-cylinders2', size_max=12, symbol='body-style', opacity=0.7) # tight layout fig.update_layout(margin=dict(l=0, r=0, b=0, t=0)) # + import pandas as pd import plotly import plotly.graph_objs as go #Read cars data from csv data = pd.read_csv("Data/cars.csv") #Set marker properties markersize = data['engine-size']/15 markercolor = data['city-mpg'] markershape = data['body-style'].replace("sedan","square").replace("wagon","circle").replace("hatchback","cross").replace("hardtop","diamond").replace("convertible","x") #Make Plotly figure fig1 = go.Scatter3d(x=data['curb-weight'], y=data['horsepower'], z=data['price'], marker=dict(size=markersize, color=markercolor, symbol=markershape, opacity=0.9, reversescale=True, colorscale='Blues'), line=dict (width=0.02), mode='markers') #Make Plot.ly Layout mylayout = go.Layout(scene=dict(xaxis=dict( title="engine-size"), yaxis=dict( title="horsepower"), zaxis=dict(title="price")),) #Plot and save html plotly.offline.plot({"data": [fig1], "layout": mylayout}, auto_open=True, filename=("6DPlot.html")) # - # <font size="5"><b>5. Visualization for categorical items with many classes</b></font> # # <br> # # <body><font size="3">In this section we will describe our visualization proposal for univariate contrast patterns in a multiclass classification problem with only categorical data. We proposed the usage of Alluvial diagrams for the visualization of patterns with one, two, three and four attributes:</font></body> p = Path("Data/car.csv") d = pd.read_csv(p) d = d.rename(columns={"class": "class0"}) pattern = "safety = 'low'[0.48 0.00 0.00 0.00]" create_sankey(d, pattern, "class0", 4) pattern = "paint != 'vhigh' AND persons = '2'[0.36 0.00 0.00 0.00]" create_sankey(d, pattern, "class0", 4) pattern = "buying != 'vhigh' AND safety != 'low' AND persons = '2'[0.24 0.00 0.00 0.00]" create_sankey(d, pattern, "class0", 4) pattern = "persons != '2' AND buying != 'vhigh' AND paint != 'vhigh' AND safety = 'low'[0.18 0.00 0.00 0.00]" create_sankey(d, pattern, "class0", 4) # <font size="5"><b>6. Visualization for categorical and numerical items</b></font> # # <br> # # <body><font size="3">In this section we will describe our visualization proposal for univariate contrast patterns with both numerical and cathegorical data. We proposed the usage of Alluvial diagrams for the visualization of patterns with one, two, three and four attributes:</font></body> p = Path("Data/adult.csv") d = pd.read_csv(p) d = d.rename(columns={"class": "class0"}) d = d.rename(columns=lambda a: a.replace("-", " ")) pattern = "sex = ' Female'[0.39 0.15]" create_sankey(d, pattern, "class0") pattern = "hours-per-week <= 41.50 AND relationship != ' Husband'[0.57 0.14]" create_sankey(d, pattern, "class0") pattern = "relationship != ' Husband' AND relationship != ' Wife' AND education-num <= 12.50[0.55 0.06]" create_sankey(d, pattern, "class0") pattern = "hours-per-week <= 41.50 AND relationship != ' Husband' AND marital-status != ' Married-civ-spouse' AND capital-gain <= 7139.50[0.54 0.05]" create_sankey(d, pattern, "class0") # <font size="5"><b>8. Visualization for numerical items in multiclass classification</b></font> # # <br> # # <body><font size="3">In this particular assignment we had to come up with different way to visualise diferent dimensionalities of patterns within data. We noticed that there is a considerable big array of prestablished techniques that can be use for this taks. We explored the usage of Sankey diagrams, more properly the usage of Alluvial diagrams as a way to demonstrate how the elements grouped throught the elements of a patter, not only for unary and binary labeled data but also for data with multiple classes.</font></body> # ### 1 numerical item and 1 categorical item # + import altair as alt from vega_datasets import data data = pd.read_csv('Data/cars.csv') alt.Chart(data).mark_circle().encode( x='horsepower', y='engine-type', color='num-of-cylinders', ).interactive()
Activity4/Assigment4.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + #default_exp foundation # - #export from fastcore.imports import * from fastcore.basics import * from functools import lru_cache from contextlib import contextmanager from copy import copy from configparser import ConfigParser import random,pickle,inspect from fastcore.test import * from nbdev.showdoc import * from fastcore.nb_imports import * # # Foundation # # > The `L` class and helpers for it # ## Foundational Functions #export @contextmanager def working_directory(path): "Change working directory to `path` and return to previous on exit." prev_cwd = Path.cwd() os.chdir(path) try: yield finally: os.chdir(prev_cwd) #export def add_docs(cls, cls_doc=None, **docs): "Copy values from `docs` to `cls` docstrings, and confirm all public methods are documented" if cls_doc is not None: cls.__doc__ = cls_doc for k,v in docs.items(): f = getattr(cls,k) if hasattr(f,'__func__'): f = f.__func__ # required for class methods f.__doc__ = v # List of public callables without docstring nodoc = [c for n,c in vars(cls).items() if callable(c) and not n.startswith('_') and c.__doc__ is None] assert not nodoc, f"Missing docs: {nodoc}" assert cls.__doc__ is not None, f"Missing class docs: {cls}" # `add_docs` allows you to add docstrings to a class and its associated methods. This function allows you to group docstrings together seperate from your code, which enables you to define one-line functions as well as organize your code more succintly. We believe this confers a number of benefits which we discuss in [our style guide](https://docs.fast.ai/dev/style.html). # # Suppose you have the following undocumented class: class T: def foo(self): pass def bar(self): pass # You can add documentation to this class like so: add_docs(T, cls_doc="A docstring for the class.", foo="The foo method.", bar="The bar method.") # Now, docstrings will appear as expected: test_eq(T.__doc__, "A docstring for the class.") test_eq(T.foo.__doc__, "The foo method.") test_eq(T.bar.__doc__, "The bar method.") # `add_docs` also validates that all of your public methods contain a docstring. If one of your methods is not documented, it will raise an error: # + class T: def foo(self): pass def bar(self): pass f=lambda: add_docs(T, "A docstring for the class.", foo="The foo method.") test_fail(f, contains="Missing docs") # + #hide class _T: def f(self): pass @classmethod def g(cls): pass add_docs(_T, "a", f="f", g="g") test_eq(_T.__doc__, "a") test_eq(_T.f.__doc__, "f") test_eq(_T.g.__doc__, "g") # - #export def docs(cls): "Decorator version of `add_docs`, using `_docs` dict" add_docs(cls, **cls._docs) return cls # Instead of using `add_docs`, you can use the decorator `docs` as shown below. Note that the docstring for the class can be set with the argument `cls_doc`: # + @docs class _T: def f(self): pass def g(cls): pass _docs = dict(cls_doc="The class docstring", f="The docstring for method f.", g="A different docstring for method g.") test_eq(_T.__doc__, "The class docstring") test_eq(_T.f.__doc__, "The docstring for method f.") test_eq(_T.g.__doc__, "A different docstring for method g.") # - # For either the `docs` decorator or the `add_docs` function, you can still define your docstrings in the normal way. Below we set the docstring for the class as usual, but define the method docstrings through the `_docs` attribute: # + @docs class _T: "The class docstring" def f(self): pass _docs = dict(f="The docstring for method f.") test_eq(_T.__doc__, "The class docstring") test_eq(_T.f.__doc__, "The docstring for method f.") # - show_doc(is_iter) assert is_iter([1]) assert not is_iter(array(1)) assert is_iter(array([1,2])) assert (o for o in range(3)) # export def coll_repr(c, max_n=10): "String repr of up to `max_n` items of (possibly lazy) collection `c`" return f'(#{len(c)}) [' + ','.join(itertools.islice(map(repr,c), max_n)) + ( '...' if len(c)>max_n else '') + ']' # `coll_repr` is used to provide a more informative [`__repr__`](https://stackoverflow.com/questions/1984162/purpose-of-pythons-repr) about list-like objects. `coll_repr` and is used by `L` to build a `__repr__` that displays the length of a list in addition to a preview of a list. # # Below is an example of the `__repr__` string created for a list of 1000 elements: test_eq(coll_repr(range(1000)), '(#1000) [0,1,2,3,4,5,6,7,8,9...]') test_eq(coll_repr(range(1000), 5), '(#1000) [0,1,2,3,4...]') test_eq(coll_repr(range(10), 5), '(#10) [0,1,2,3,4...]') test_eq(coll_repr(range(5), 5), '(#5) [0,1,2,3,4]') # We can set the option `max_n` to optionally preview a specified number of items instead of the default: test_eq(coll_repr(range(1000), max_n=5), '(#1000) [0,1,2,3,4...]') # export def is_bool(x): "Check whether `x` is a bool or None" return isinstance(x,(bool,NoneType)) or risinstance('bool_', x) # export def mask2idxs(mask): "Convert bool mask or index list to index `L`" if isinstance(mask,slice): return mask mask = list(mask) if len(mask)==0: return [] it = mask[0] if hasattr(it,'item'): it = it.item() if is_bool(it): return [i for i,m in enumerate(mask) if m] return [int(i) for i in mask] test_eq(mask2idxs([False,True,False,True]), [1,3]) test_eq(mask2idxs(array([False,True,False,True])), [1,3]) test_eq(mask2idxs(array([1,2,3])), [1,2,3]) #export def cycle(o): "Like `itertools.cycle` except creates list of `None`s if `o` is empty" o = listify(o) return itertools.cycle(o) if o is not None and len(o) > 0 else itertools.cycle([None]) test_eq(itertools.islice(cycle([1,2,3]),5), [1,2,3,1,2]) test_eq(itertools.islice(cycle([]),3), [None]*3) test_eq(itertools.islice(cycle(None),3), [None]*3) test_eq(itertools.islice(cycle(1),3), [1,1,1]) #export def zip_cycle(x, *args): "Like `itertools.zip_longest` but `cycle`s through elements of all but first argument" return zip(x, *map(cycle,args)) test_eq(zip_cycle([1,2,3,4],list('abc')), [(1, 'a'), (2, 'b'), (3, 'c'), (4, 'a')]) #export def is_indexer(idx): "Test whether `idx` will index a single item in a list" return isinstance(idx,int) or not getattr(idx,'ndim',1) # You can, for example index a single item in a list with an integer or a 0-dimensional numpy array: assert is_indexer(1) assert is_indexer(np.array(1)) # However, you cannot index into single item in a list with another list or a numpy array with ndim > 0. assert not is_indexer([1, 2]) assert not is_indexer(np.array([[1, 2], [3, 4]])) # ## `L` helpers #export class CollBase: "Base class for composing a list of `items`" def __init__(self, items): self.items = items def __len__(self): return len(self.items) def __getitem__(self, k): return self.items[list(k) if isinstance(k,CollBase) else k] def __setitem__(self, k, v): self.items[list(k) if isinstance(k,CollBase) else k] = v def __delitem__(self, i): del(self.items[i]) def __repr__(self): return self.items.__repr__() def __iter__(self): return self.items.__iter__() # `ColBase` is a base class that emulates the functionality of a python `list`: # + class _T(CollBase): pass l = _T([1,2,3,4,5]) test_eq(len(l), 5) # __len__ test_eq(l[-1], 5); test_eq(l[0], 1) #__getitem__ l[2] = 100; test_eq(l[2], 100) # __set_item__ del l[0]; test_eq(len(l), 4) # __delitem__ test_eq(str(l), '[2, 100, 4, 5]') # __repr__ # - # ## L - #export class _L_Meta(type): def __call__(cls, x=None, *args, **kwargs): if not args and not kwargs and x is not None and isinstance(x,cls): return x return super().__call__(x, *args, **kwargs) #export class L(GetAttr, CollBase, metaclass=_L_Meta): "Behaves like a list of `items` but can also index with list of indices or masks" _default='items' def __init__(self, items=None, *rest, use_list=False, match=None): if (use_list is not None) or not is_array(items): items = listify(items, *rest, use_list=use_list, match=match) super().__init__(items) @property def _xtra(self): return None def _new(self, items, *args, **kwargs): return type(self)(items, *args, use_list=None, **kwargs) def __getitem__(self, idx): return self._get(idx) if is_indexer(idx) else L(self._get(idx), use_list=None) def copy(self): return self._new(self.items.copy()) def _get(self, i): if is_indexer(i) or isinstance(i,slice): return getattr(self.items,'iloc',self.items)[i] i = mask2idxs(i) return (self.items.iloc[list(i)] if hasattr(self.items,'iloc') else self.items.__array__()[(i,)] if hasattr(self.items,'__array__') else [self.items[i_] for i_ in i]) def __setitem__(self, idx, o): "Set `idx` (can be list of indices, or mask, or int) items to `o` (which is broadcast if not iterable)" if isinstance(idx, int): self.items[idx] = o else: idx = idx if isinstance(idx,L) else listify(idx) if not is_iter(o): o = [o]*len(idx) for i,o_ in zip(idx,o): self.items[i] = o_ def __eq__(self,b): if b is None: return False if risinstance('ndarray', b): return array_equal(b, self) if isinstance(b, (str,dict)): return False return all_equal(b,self) def sorted(self, key=None, reverse=False): return self._new(sorted_ex(self, key=key, reverse=reverse)) def __iter__(self): return iter(self.items.itertuples() if hasattr(self.items,'iloc') else self.items) def __contains__(self,b): return b in self.items def __reversed__(self): return self._new(reversed(self.items)) def __invert__(self): return self._new(not i for i in self) def __repr__(self): return repr(self.items) def _repr_pretty_(self, p, cycle): p.text('...' if cycle else repr(self.items) if is_array(self.items) else coll_repr(self)) def __mul__ (a,b): return a._new(a.items*b) def __add__ (a,b): return a._new(a.items+listify(b)) def __radd__(a,b): return a._new(b)+a def __addi__(a,b): a.items += list(b) return a @classmethod def split(cls, s, sep=None, maxsplit=-1): return cls(s.split(sep,maxsplit)) @classmethod def range(cls, a, b=None, step=None): return cls(range_of(a, b=b, step=step)) def map(self, f, *args, gen=False, **kwargs): return self._new(map_ex(self, f, *args, gen=gen, **kwargs)) def argwhere(self, f, negate=False, **kwargs): return self._new(argwhere(self, f, negate, **kwargs)) def argfirst(self, f, negate=False): return first(i for i,o in self.enumerate() if f(o)) def filter(self, f=noop, negate=False, gen=False, **kwargs): return self._new(filter_ex(self, f=f, negate=negate, gen=gen, **kwargs)) def enumerate(self): return L(enumerate(self)) def renumerate(self): return L(renumerate(self)) def unique(self, sort=False, bidir=False, start=None): return L(uniqueify(self, sort=sort, bidir=bidir, start=start)) def val2idx(self): return val2idx(self) def cycle(self): return cycle(self) def map_dict(self, f=noop, *args, gen=False, **kwargs): return {k:f(k, *args,**kwargs) for k in self} def map_first(self, f=noop, g=noop, *args, **kwargs): return first(self.map(f, *args, gen=True, **kwargs), g) def itemgot(self, *idxs): x = self for idx in idxs: x = x.map(itemgetter(idx)) return x def attrgot(self, k, default=None): return self.map(lambda o: o.get(k,default) if isinstance(o, dict) else nested_attr(o,k,default)) def starmap(self, f, *args, **kwargs): return self._new(itertools.starmap(partial(f,*args,**kwargs), self)) def zip(self, cycled=False): return self._new((zip_cycle if cycled else zip)(*self)) def zipwith(self, *rest, cycled=False): return self._new([self, *rest]).zip(cycled=cycled) def map_zip(self, f, *args, cycled=False, **kwargs): return self.zip(cycled=cycled).starmap(f, *args, **kwargs) def map_zipwith(self, f, *rest, cycled=False, **kwargs): return self.zipwith(*rest, cycled=cycled).starmap(f, **kwargs) def shuffle(self): it = copy(self.items) random.shuffle(it) return self._new(it) def concat(self): return self._new(itertools.chain.from_iterable(self.map(L))) def reduce(self, f, initial=None): return reduce(f, self) if initial is None else reduce(f, self, initial) def sum(self): return self.reduce(operator.add, 0) def product(self): return self.reduce(operator.mul, 1) def setattrs(self, attr, val): [setattr(o,attr,val) for o in self] #export add_docs(L, __getitem__="Retrieve `idx` (can be list of indices, or mask, or int) items", range="Class Method: Same as `range`, but returns `L`. Can pass collection for `a`, to use `len(a)`", split="Class Method: Same as `str.split`, but returns an `L`", copy="Same as `list.copy`, but returns an `L`", sorted="New `L` sorted by `key`. If key is str use `attrgetter`; if int use `itemgetter`", unique="Unique items, in stable order", val2idx="Dict from value to index", filter="Create new `L` filtered by predicate `f`, passing `args` and `kwargs` to `f`", argwhere="Like `filter`, but return indices for matching items", argfirst="Return index of first matching item", map="Create new `L` with `f` applied to all `items`, passing `args` and `kwargs` to `f`", map_first="First element of `map_filter`", map_dict="Like `map`, but creates a dict from `items` to function results", starmap="Like `map`, but use `itertools.starmap`", itemgot="Create new `L` with item `idx` of all `items`", attrgot="Create new `L` with attr `k` (or value `k` for dicts) of all `items`.", cycle="Same as `itertools.cycle`", enumerate="Same as `enumerate`", renumerate="Same as `renumerate`", zip="Create new `L` with `zip(*items)`", zipwith="Create new `L` with `self` zip with each of `*rest`", map_zip="Combine `zip` and `starmap`", map_zipwith="Combine `zipwith` and `starmap`", concat="Concatenate all elements of list", shuffle="Same as `random.shuffle`, but not inplace", reduce="Wrapper for `functools.reduce`", sum="Sum of the items", product="Product of the items", setattrs="Call `setattr` on all items" ) #export #hide # Here we are fixing the signature of L. What happens is that the __call__ method on the MetaClass of L shadows the __init__ # giving the wrong signature (https://stackoverflow.com/questions/49740290/call-from-metaclass-shadows-signature-of-init). def _f(items=None, *rest, use_list=False, match=None): ... L.__signature__ = inspect.signature(_f) #export Sequence.register(L); # `L` is a drop in replacement for a python `list`. Inspired by [NumPy](http://www.numpy.org/), `L`, supports advanced indexing and has additional methods (outlined below) that provide additional functionality and encourage simple expressive code. For example, the code below takes a list of pairs, selects the second item of each pair, takes its absolute value, filters items greater than 4, and adds them up: # + from fastcore.utils import gt d = dict(a=1,b=-5,d=6,e=9).items() test_eq(L(d).itemgot(1).map(abs).filter(gt(4)).sum(), 20) # abs(-5) + abs(6) + abs(9) = 20; 1 was filtered out. # - # Read [this overview section](https://fastcore.fast.ai/#L) for a quick tutorial of `L`, as well as background on the name. # # You can create an `L` from an existing iterable (e.g. a list, range, etc) and access or modify it with an int list/tuple index, mask, int, or slice. All `list` methods can also be used with `L`. t = L(range(12)) test_eq(t, list(range(12))) test_ne(t, list(range(11))) t.reverse() test_eq(t[0], 11) t[3] = "h" test_eq(t[3], "h") t[3,5] = ("j","k") test_eq(t[3,5], ["j","k"]) test_eq(t, L(t)) test_eq(L(L(1,2),[3,4]), ([1,2],[3,4])) t # Any `L` is a `Sequence` so you can use it with methods like `random.sample`: assert isinstance(t, Sequence) import random random.sample(t, 3) # + #hide # test set items with L of collections x = L([[1,2,3], [4,5], [6,7]]) x[0] = [1,2] test_eq(x, L([[1,2], [4,5], [6,7]])) # non-idiomatic None-ness check - avoid infinite recursion some_var = L(['a', 'b']) assert some_var != None, "L != None" # - # There are optimized indexers for arrays, tensors, and DataFrames. # + arr = np.arange(9).reshape(3,3) t = L(arr, use_list=None) test_eq(t[1,2], arr[[1,2]]) import pandas as pd df = pd.DataFrame({'a':[1,2,3]}) t = L(df, use_list=None) test_eq(t[1,2], L(pd.DataFrame({'a':[2,3]}, index=[1,2]), use_list=None)) # - # You can also modify an `L` with `append`, `+`, and `*`. t = L() test_eq(t, []) t.append(1) test_eq(t, [1]) t += [3,2] test_eq(t, [1,3,2]) t = t + [4] test_eq(t, [1,3,2,4]) t = 5 + t test_eq(t, [5,1,3,2,4]) test_eq(L(1,2,3), [1,2,3]) test_eq(L(1,2,3), L(1,2,3)) t = L(1)*5 t = t.map(operator.neg) test_eq(t,[-1]*5) test_eq(~L([True,False,False]), L([False,True,True])) t = L(range(4)) test_eq(zip(t, L(1).cycle()), zip(range(4),(1,1,1,1))) t = L.range(100) test_shuffled(t,t.shuffle()) test_eq(L([]).sum(), 0) test_eq(L([]).product(), 1) def _f(x,a=0): return x+a t = L(1)*5 test_eq(t.map(_f), t) test_eq(t.map(_f,1), [2]*5) test_eq(t.map(_f,a=2), [3]*5) # An `L` can be constructed from anything iterable, although tensors and arrays will not be iterated over on construction, unless you pass `use_list` to the constructor. test_eq(L([1,2,3]),[1,2,3]) test_eq(L(L([1,2,3])),[1,2,3]) test_ne(L([1,2,3]),[1,2,]) test_eq(L('abc'),['abc']) test_eq(L(range(0,3)),[0,1,2]) test_eq(L(o for o in range(0,3)),[0,1,2]) test_eq(L(array(0)),[array(0)]) test_eq(L([array(0),array(1)]),[array(0),array(1)]) test_eq(L(array([0.,1.1]))[0],array([0.,1.1])) test_eq(L(array([0.,1.1]), use_list=True), [array(0.),array(1.1)]) # `use_list=True` to unwrap arrays/arrays # If `match` is not `None` then the created list is same len as `match`, either by: # # - If `len(items)==1` then `items` is replicated, # - Otherwise an error is raised if `match` and `items` are not already the same size. test_eq(L(1,match=[1,2,3]),[1,1,1]) test_eq(L([1,2],match=[2,3]),[1,2]) test_fail(lambda: L([1,2],match=[1,2,3])) # If you create an `L` from an existing `L` then you'll get back the original object (since `L` uses the `NewChkMeta` metaclass). test_is(L(t), t) # An `L` is considred equal to a list if they have the same elements. It's never considered equal to a `str` a `set` or a `dict` even if they have the same elements/keys. test_eq(L(['a', 'b']), ['a', 'b']) test_ne(L(['a', 'b']), 'ab') test_ne(L(['a', 'b']), {'a':1, 'b':2}) # ### `L` Methods show_doc(L.__getitem__) t = L(range(12)) test_eq(t[1,2], [1,2]) # implicit tuple test_eq(t[[1,2]], [1,2]) # list test_eq(t[:3], [0,1,2]) # slice test_eq(t[[False]*11 + [True]], [11]) # mask test_eq(t[array(3)], 3) show_doc(L.__setitem__) t[4,6] = 0 test_eq(t[4,6], [0,0]) t[4,6] = [1,2] test_eq(t[4,6], [1,2]) show_doc(L.unique) test_eq(L(4,1,2,3,4,4).unique(), [4,1,2,3]) show_doc(L.val2idx) test_eq(L(1,2,3).val2idx(), {3:2,1:0,2:1}) show_doc(L.filter) list(t) test_eq(t.filter(lambda o:o<5), [0,1,2,3,1,2]) test_eq(t.filter(lambda o:o<5, negate=True), [5,7,8,9,10,11]) show_doc(L.argwhere) test_eq(t.argwhere(lambda o:o<5), [0,1,2,3,4,6]) show_doc(L.argfirst) test_eq(t.argfirst(lambda o:o>4), 5) show_doc(L.map) test_eq(L.range(4).map(operator.neg), [0,-1,-2,-3]) # If `f` is a string then it is treated as a format string to create the mapping: test_eq(L.range(4).map('#{}#'), ['#0#','#1#','#2#','#3#']) # If `f` is a dictionary (or anything supporting `__getitem__`) then it is indexed to create the mapping: test_eq(L.range(4).map(list('abcd')), list('abcd')) # You can also pass the same `arg` params that `bind` accepts: def f(a=None,b=None): return b test_eq(L.range(4).map(f, b=arg0), range(4)) show_doc(L.map_dict) test_eq(L(range(1,5)).map_dict(), {1:1, 2:2, 3:3, 4:4}) test_eq(L(range(1,5)).map_dict(operator.neg), {1:-1, 2:-2, 3:-3, 4:-4}) show_doc(L.zip) t = L([[1,2,3],'abc']) test_eq(t.zip(), [(1, 'a'),(2, 'b'),(3, 'c')]) t = L([[1,2,3,4],['a','b','c']]) test_eq(t.zip(cycled=True ), [(1, 'a'),(2, 'b'),(3, 'c'),(4, 'a')]) test_eq(t.zip(cycled=False), [(1, 'a'),(2, 'b'),(3, 'c')]) show_doc(L.map_zip) t = L([1,2,3],[2,3,4]) test_eq(t.map_zip(operator.mul), [2,6,12]) show_doc(L.zipwith) b = [[0],[1],[2,2]] t = L([1,2,3]).zipwith(b) test_eq(t, [(1,[0]), (2,[1]), (3,[2,2])]) show_doc(L.map_zipwith) test_eq(L(1,2,3).map_zipwith(operator.mul, [2,3,4]), [2,6,12]) show_doc(L.itemgot) test_eq(t.itemgot(1), b) show_doc(L.attrgot) # + # Example when items are not a dict a = [SimpleNamespace(a=3,b=4),SimpleNamespace(a=1,b=2)] test_eq(L(a).attrgot('b'), [4,2]) #Example of when items are a dict b =[{'id': 15, 'name': 'nbdev'}, {'id': 17, 'name': 'fastcore'}] test_eq(L(b).attrgot('id'), [15, 17]) # - show_doc(L.sorted) test_eq(L(a).sorted('a').attrgot('b'), [2,4]) show_doc(L.split) test_eq(L.split('a b c'), list('abc')) show_doc(L.range) test_eq_type(L.range([1,1,1]), L(range(3))) test_eq_type(L.range(5,2,2), L(range(5,2,2))) show_doc(L.concat) test_eq(L([0,1,2,3],4,L(5,6)).concat(), range(7)) show_doc(L.copy) t = L([0,1,2,3],4,L(5,6)).copy() test_eq(t.concat(), range(7)) show_doc(L.map_first) t = L(0,1,2,3) test_eq(t.map_first(lambda o:o*2 if o>2 else None), 6) show_doc(L.setattrs) t = L(SimpleNamespace(),SimpleNamespace()) t.setattrs('foo', 'bar') test_eq(t.attrgot('foo'), ['bar','bar']) # ## Config - #export def save_config_file(file, d, **kwargs): "Write settings dict to a new config file, or overwrite the existing one." config = ConfigParser(**kwargs) config['DEFAULT'] = d config.write(open(file, 'w')) #export def read_config_file(file, **kwargs): config = ConfigParser(**kwargs) config.read(file) return config['DEFAULT'] # Config files are saved and read using Python's `configparser.ConfigParser`, inside the `DEFAULT` section. _d = dict(user='fastai', lib_name='fastcore', some_path='test') try: save_config_file('tmp.ini', _d) res = read_config_file('tmp.ini') finally: os.unlink('tmp.ini') test_eq(res, _d) #export class Config: "Reading and writing `ConfigParser` ini files" def __init__(self, cfg_path, cfg_name, create=None): cfg_path = Path(cfg_path).expanduser().absolute() self.config_path,self.config_file = cfg_path,cfg_path/cfg_name if not self.config_file.exists(): if create: self.d = create cfg_path.mkdir(exist_ok=True, parents=True) self.save() else: raise FileNotFoundError(f"Could not find {cfg_name}") self.d = read_config_file(self.config_file) def __setitem__(self,k,v): self.d[k] = str(v) def __contains__(self,k): return k in self.d def save(self): save_config_file(self.config_file,self.d) def __getattr__(self,k): return stop(AttributeError(k)) if k=='d' or k not in self.d else self.get(k) def __getitem__(self,k): return stop(IndexError(k)) if k not in self.d else self.get(k) def get(self,k,default=None): return self.d.get(k, default) def path(self,k,default=None): v = self.get(k, default) return v if v is None else self.config_path/v # `Config` and provides direct access to the 'DEFAULT' section of a `ConfigParser` ini file. # + try: cfg = Config('..', 'tmp.ini', create=_d) finally: os.unlink('../tmp.ini') test_eq(cfg.user,'fastai') test_eq(cfg['some_path'], 'test') test_eq(cfg.path('some_path'), Path('../test').absolute()) test_eq(cfg.get('foo','bar'),'bar') # - # # Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/02_foundation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="lR-eQfx0acq8" import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm as tqdm # %matplotlib inline import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import random # + id="HxCE5TSmap0T" # from google.colab import drive # drive.mount('/content/drive') # + id="yh4EScpqacrC" transform = transforms.Compose( [transforms.CenterCrop((28,28)),transforms.ToTensor(),transforms.Normalize([0.5], [0.5])]) # + id="YoAqFwqpacrH" mnist_trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform) mnist_testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) # + id="56x0FjYZacrM" colab={"base_uri": "https://localhost:8080/"} outputId="79e26cc5-6e79-4749-f50d-5eaba76841ad" index1 = [np.where(mnist_trainset.targets==0)[0] , np.where(mnist_trainset.targets==1)[0] ] index1 = np.concatenate(index1,axis=0) len(index1) #12665 # + id="QZWNdX-BvxKP" colab={"base_uri": "https://localhost:8080/"} outputId="1f915bd1-629f-48cf-ddf4-8a941b5d359d" true = 100 total = 47000 sin = total-true sin # + id="D45juO_vINDB" epochs = 300 # + id="yQOoEO2meJDv" colab={"base_uri": "https://localhost:8080/"} outputId="a0a76f7f-8569-4538-d029-76203d049b93" indices = np.random.choice(index1,true) indices.shape # + id="d_8snSm1acrR" colab={"base_uri": "https://localhost:8080/"} outputId="31796abb-1d90-4736-c70f-88bb0551fb41" index = np.where(np.logical_and(mnist_trainset.targets!=0,mnist_trainset.targets!=1))[0] #47335 index.shape # + id="ejwWlWXm5pP7" req_index = np.random.choice(index.shape[0], sin, replace=False) # req_index # + colab={"base_uri": "https://localhost:8080/"} id="mEg6CPL-6fuj" outputId="5821d9e3-6eae-4403-d0c0-6202406d2b47" index = index[req_index] index.shape # + id="vYhiSaAnacrW" colab={"base_uri": "https://localhost:8080/"} outputId="8867e2af-c56e-4f5e-9d39-70c092294503" values = np.random.choice([0,1],size= sin) print(sum(values ==0),sum(values==1), sum(values ==0) + sum(values==1) ) # + id="uLuqrhO1hRR6" mnist_trainset.data = torch.cat((mnist_trainset.data[indices],mnist_trainset.data[index])) mnist_trainset.targets = torch.cat((mnist_trainset.targets[indices],torch.Tensor(values).type(torch.LongTensor))) # + colab={"base_uri": "https://localhost:8080/"} id="43UoqeUI4hIV" outputId="c7afb06f-2627-48ac-8c44-9ad9cae240e7" mnist_trainset.targets.shape, mnist_trainset.data.shape # + id="iczET1tohOsL" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="1960504c-83b4-424e-e9ad-994ee7d224a1" # mnist_trainset.targets[index] = torch.Tensor(values).type(torch.LongTensor) j =20078 # Without Shuffle upto True Training numbers correct , after that corrupted print(plt.imshow(mnist_trainset.data[j]),mnist_trainset.targets[j]) # + id="U8WPVHnPacrd" trainloader = torch.utils.data.DataLoader(mnist_trainset, batch_size=250,shuffle=True, num_workers=2) # + id="7L7ocAcracri" testloader = torch.utils.data.DataLoader(mnist_testset, batch_size=250,shuffle=False, num_workers=2) # + colab={"base_uri": "https://localhost:8080/"} id="ULbHmoX-5QRM" outputId="78f9b0e7-c084-4caa-d2b7-ad1c8f5106a6" mnist_trainset.data.shape # + id="pdhAr2p6acrm" classes = ('zero', 'one') # + id="jhUOj6Tnacrr" dataiter = iter(trainloader) images, labels = dataiter.next() # + id="vVgW5hBkacrv" colab={"base_uri": "https://localhost:8080/"} outputId="c62648ae-acb7-40a1-feb9-d223597950ba" images[:4].shape # + id="nUtA80KCacr1" def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # + id="Xv34FfAdacr5" colab={"base_uri": "https://localhost:8080/", "height": 155} outputId="5b43ddfc-ddda-4b67-9fcd-565741e4d1a2" imshow(torchvision.utils.make_grid(images[:10])) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(10))) # + id="QHs5PlMOT-Hv" class Conv_module(nn.Module): def __init__(self,inp_ch,f,s,k,pad): super(Conv_module,self).__init__() self.inp_ch = inp_ch self.f = f self.s = s self.k = k self.pad = pad self.conv = nn.Conv2d(self.inp_ch,self.f,k,stride=s,padding=self.pad) self.bn = nn.BatchNorm2d(self.f) self.act = nn.ReLU() def forward(self,x): x = self.conv(x) x = self.bn(x) x = self.act(x) return x # + id="Imcx6keET-sA" class inception_module(nn.Module): def __init__(self,inp_ch,f0,f1): super(inception_module, self).__init__() self.inp_ch = inp_ch self.f0 = f0 self.f1 = f1 self.conv1 = Conv_module(self.inp_ch,self.f0,1,1,pad=0) self.conv3 = Conv_module(self.inp_ch,self.f1,1,3,pad=1) #self.conv1 = nn.Conv2d(3,self.f0,1) #self.conv3 = nn.Conv2d(3,self.f1,3,padding=1) def forward(self,x): x1 = self.conv1.forward(x) x3 = self.conv3.forward(x) #print(x1.shape,x3.shape) x = torch.cat((x1,x3),dim=1) return x # + id="0bMmJLXzUAWe" class downsample_module(nn.Module): def __init__(self,inp_ch,f): super(downsample_module,self).__init__() self.inp_ch = inp_ch self.f = f self.conv = Conv_module(self.inp_ch,self.f,2,3,pad=0) self.pool = nn.MaxPool2d(3,stride=2,padding=0) def forward(self,x): x1 = self.conv(x) #print(x1.shape) x2 = self.pool(x) #print(x2.shape) x = torch.cat((x1,x2),dim=1) return x,x1 # + id="sRcNGxt7UDO2" class inception_net(nn.Module): def __init__(self): super(inception_net,self).__init__() self.conv1 = Conv_module(1,96,1,3,0) self.incept1 = inception_module(96,32,32) self.incept2 = inception_module(64,32,48) self.downsample1 = downsample_module(80,80) self.incept3 = inception_module(160,112,48) self.incept4 = inception_module(160,96,64) self.incept5 = inception_module(160,80,80) self.incept6 = inception_module(160,48,96) self.downsample2 = downsample_module(144,96) self.incept7 = inception_module(240,176,60) self.incept8 = inception_module(236,176,60) self.pool = nn.AvgPool2d(5) self.linear = nn.Linear(236,2) def forward(self,x): x = self.conv1.forward(x) #act1 = x x = self.incept1.forward(x) #act2 = x x = self.incept2.forward(x) #act3 = x x,act4 = self.downsample1.forward(x) x = self.incept3.forward(x) #act5 = x x = self.incept4.forward(x) #act6 = x x = self.incept5.forward(x) #act7 = x x = self.incept6.forward(x) #act8 = x x,act9 = self.downsample2.forward(x) x = self.incept7.forward(x) #act10 = x x = self.incept8.forward(x) #act11 = x #print(x.shape) x = self.pool(x) #print(x.shape) x = x.view(-1,1*1*236) x = self.linear(x) return x # + id="rByxqKdOacsM" inc = inception_net() inc = inc.to("cuda") # + id="imWjagA2acsO" criterion_inception = nn.CrossEntropyLoss() optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9) # + id="r5mCNr38acsR" colab={"base_uri": "https://localhost:8080/"} outputId="3ee55554-8db7-4d25-dc7e-5b11875ff1b4" acti = [] loss_curi = [] for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # print(inputs.shape) # zero the parameter gradients optimizer_inception.zero_grad() # forward + backward + optimize outputs = inc(inputs) loss = criterion_inception(outputs, labels) loss.backward() optimizer_inception.step() # print statistics running_loss += loss.item() if i % 50 == 49: # print every 50 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 50)) ep_lossi.append(running_loss/50) # loss per minibatch running_loss = 0.0 loss_curi.append(np.mean(ep_lossi)) #loss per epoch if (np.mean(ep_lossi)<=0.03): break # acti.append(actis) print('Finished Training') # + id="IxXbhlbEacsW" colab={"base_uri": "https://localhost:8080/"} outputId="b0c3370a-c440-4d28-e85e-c824d3d7c9f0" correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = inc(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 60000 train images: %d %%' % ( 100 * correct / total)) # + id="U2kQXLzgacsa" colab={"base_uri": "https://localhost:8080/"} outputId="2609c7c1-a2ba-45f9-a577-fd91ac429d96" total,correct # + id="5IcmEl2lacsd" colab={"base_uri": "https://localhost:8080/"} outputId="6430430d-9dd3-4057-a257-fda86a350a06" correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= inc(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) # + id="KrlBRcSQacsg" out = np.concatenate(out,axis=0) # + id="EWyBhOPlacsi" pred = np.concatenate(pred,axis=0) # + id="IJ1F7Lfdacsl" colab={"base_uri": "https://localhost:8080/"} outputId="529fc4e0-a859-4118-dc6c-62407f0871c6" index = np.logical_or(out ==1,out==0) print(index.shape) # + id="h7PmLPkGacsn" colab={"base_uri": "https://localhost:8080/"} outputId="5d2716ae-4227-4f0c-f20e-84f686a31f69" acc = sum(out[index] == pred[index])/sum(index) print('Accuracy of the network on the 10000 test images: %d %%' % ( 100*acc)) # + id="fvCi8x41acsq" colab={"base_uri": "https://localhost:8080/"} outputId="a685e0c9-550c-4035-a2ab-447f4d84c133" sum(index) # + id="bdGNgi62acss" colab={"base_uri": "https://localhost:8080/"} outputId="67d226ce-f7ce-46b0-a63a-64079d9c26fa" import random random.sample([1,2,3,4,5,6,7,8],5) # + id="KBZqLh7heULT" # torch.save(inc.state_dict(),"/content/drive/My Drive/model_simple_8000.pkl") # + id="JgLuOIeLeUHk" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="7254130a-4db8-419f-bbba-948967dae6f9" fig = plt.figure() plt.plot(loss_curi,label="loss_Curve") plt.xlabel("epochs") plt.ylabel("training_loss") plt.legend() fig.savefig("loss_curve.pdf") # + id="pZhSFZLeeXeO" # + id="NuAy-cU0sY9z"
CODS_COMAD/SIN/MNIST/mini_inception_8layer_100.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ##### Copyright \u0026copy; 2020 The TensorFlow Authors. # + #@title Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # - # # Create a TFX pipeline using templates # ## Introduction # # This document will provide instructions to create a TensorFlow Extended (TFX) pipeline # using *templates* which are provided with TFX Python package. # Most of instructions are Linux shell commands, and corresponding # Jupyter Notebook code cells which invoke those commands using `!` are provided. # # You will build a pipeline using [Taxi Trips dataset]( # https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) # released by the City of Chicago. We strongly encourage you to try to build # your OWN pipeline using your OWN dataset by utilizing this pipeline as a baseline. # # ## Prerequisites # # * Linux # * Python >= 3.5.3 # * [Docker Engine](https://docs.docker.com/install/) # # You can get all prerequisites easily by [launching this notebook on Google Cloud Platform AI Platform Notebook](https://console.cloud.google.com/mlengine/notebooks/deploy-notebook?q=download_url%3Dhttps%253A%252F%252Fraw.githubusercontent.com%252Ftensorflow%252Ftfx%252Fmaster%252Fdocs%252Ftutorials%252Ftfx%252Ftemplate.ipynb) # # ## Step 1. Set up your environment. # # You should prepare a development environment to build a pipeline, and a Kubeflow Pipeline cluster to run the newly built pipeline. # # ### 1a. Development environment # # #### On your local machine # # Install `tfx` and `kfp` python packages. `kfp` is required to use Kubeflow Pipeline(KFP) as an orchestrator engine. # # You also need to download `skaffold`. `skaffold` is a tool to build docker images easily. A custom docker image will be used when running a pipeline on KFP. # # There are a couple of Notebook files in the template, and a Jupyter Notebook kernel with this virtualenv is required to run them. # # You can use following shell script snippet to set up your environment. # # ```sh # # Create a virtualenv for tfx. # virtualenv -p python3 venv # source venv/bin/activate # # Install python packages. # pip install tfx kfp # # Download skaffold. # curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 # chmod +x skaffold # # mv skaffold venv/bin/ # # Install a Jupyter Notebook kernel for this virtualenv. # python -m ipykernel install --user --name=tfx # ``` # #### On Cloud AI Platform Notebook # If you are using Cloud AI Platform Notebook, create a TensorFlow pre-installed instance for the notebook. # # Install `tfx`, `kfp`, and `skaffold`, and add installation path to the `PATH` environment variable. # # <div class="alert alert-warning"> # NOTE: There might be some errors during package installation. For example, "ERROR: some-package 0.some_version.1 has requirement other-package!=2.0.,&lt;3,&gt;=1.15, but you'll have other-package 2.0.0 which is incompatible." Please ignore these errors at this moment. # </div> # # TODO(b/149346490): TFX team is preparing a base image which includes tfx, kfp and skaffold by default. You won't have to install packages in this section in the near future. # Install tfx and kfp Python packages. # !pip3 install --user --upgrade -q tfx # !pip3 install --user --upgrade -q kfp # Download skaffold and set it executable. # !curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 && chmod +x skaffold && mv skaffold /home/jupyter/.local/bin/ # Set `PATH` to include user python binary directory and a directory containing `skaffold`. # PATH=%env PATH # %env PATH={PATH}:/home/jupyter/.local/bin # Let's check the version of TFX. # ```bash # python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))" # ``` # !python3 -c "import tfx; print('TFX version: {}'.format(tfx.__version__))" # ### 1b. Kubeflow Pipeline cluster # # TFX pipeline can be run on Kubernetes using [Kubeflow Pipelines](https://www.kubeflow.org/docs/pipelines/overview/pipelines-overview/). If you don't have one, you can [create a Kubeflow Pipeline cluster on GCP](https://cloud.google.com/ai-platform/pipelines/docs/setting-up). # This tutorial assumes that the cluster runs on GCP. # # You should be logged in to cloud services to use cloud APIs. If you are using Google Cloud AI Platform Notebook, you are automatically logged in to GCP. Otherwise, you should be logged in using [gcloud utility](https://cloud.google.com/sdk/gcloud/reference/auth/login). # # Let's set some environment variables to use Kubeflow Pipeline. # # First, make sure what your GCP project ID is. If you are using terminal environment, You can find you project ID and set it to an environment variable with following command. # ```bash # export GCP_PROJECT_ID=$(gcloud config list --format 'value(core.project)' 2>/dev/null) # ``` # Read GCP project id from env. # shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null GCP_PROJECT_ID=shell_output[0] print("GCP project ID:" + GCP_PROJECT_ID) # We also need to access your KFP cluster. You can access it in your Google Cloud Console under "AI Platform > Pipeline" menu. The "endpoint" of the KFP cluster can be found from the URL of the Pipelines dashboard. Let's set the endpoint to `ENDPOINT` envrionment variable. ENDPOINT should contain only the host part of the URL. For example, if the URL of the KFP dashboard is `https://1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com/#/start`, ENDPOINT value becomes `1e9deb537390ca22-dot-asia-east1.pipelines.googleusercontent.com`. # # ```bash # export ENDPOINT=XXXXXXX.pipelines.googleusercontent.com # ``` # <div class="alert alert-danger"> # <b>Note:</b> You MUST set your ENDPOINT value below. # </div> # This refers to the KFP cluster endpoint ENDPOINT='' # Enter your ENDPOINT here. if not ENDPOINT: from absl import logging logging.error('Set your ENDPOINT in this cell.') # As mentioned above, we will use a custom docker image to run pipeline on KFP. This docker image should be hosted on a docker registry, and we recommend Google Container Registry(gcr.io). Please set `CUSTOM_TFX_IMAGE` environment variable to an appropriate image name. For example, following command sets the image name as `tfx-pipeline` under the current GCP project. # # ```bash # export CUSTOM_TFX_IMAGE=gcr.io/${GCP_PROJECT_ID}/tfx-pipeline # ``` # Docker image name for the pipeline image CUSTOM_TFX_IMAGE='gcr.io/' + GCP_PROJECT_ID + '/tfx-pipeline' # And, it's done. We are ready to create a pipeline. # ## Step 2. Copy predefined template to your project directory. # # In this step, we will create a working pipeline project by copying from a predefined template. # # Please decide a name for the new pipeline and a project directory to put your files in. # Let's Define environment variables for these. # ```bash # export PIPELINE_NAME="my_pipeline" # export PROJECT_DIR=~/tfx/${PIPELINE_NAME} # ``` PIPELINE_NAME="my_pipeline" import os PROJECT_DIR=os.path.join(os.path.expanduser("~"),"AIHub",PIPELINE_NAME) # TFX provides provides `taxi` template with tfx python package. If you are planning to solve a point-wise prediction problem including classification and regresssion, this template could be used as a starting point. # # Use `tfx` cli to copy predefined template to your project directory. # # ```sh # tfx template copy \ # --pipeline_name="${PIPELINE_NAME}" \ # --destination_path="${PROJECT_DIR}" \ # --model=taxi # ``` # !tfx template copy \ # --pipeline_name={PIPELINE_NAME} \ # --destination_path={PROJECT_DIR} \ # --model=taxi # Change working directory to the project directory which contains generated files. # ```bash # # cd ${PROJECT_DIR} # ``` # %cd {PROJECT_DIR} # If you are using Cloud AI Platform Notebook, Don't forget to change directory in `File Browser` on the left side of the screen, too. # ## Step 3. Browse your copied source files. # TFX template provides basic scaffold files to build a pipeline, including python source codes, sample data and Jupyter Notebook files to analysis the output of the pipeline. `taxi` template uses the same *Chicago Taxi* dataset and ML model with [Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/airflow_workshop). # # Here is brief introduction to each python files. # - configs.py: defines common constants for pipeline runners. # - pipeline.py: defines TFX components and a pipeline. # - beam_dag_runner.py / kubeflow_dag_runner.py: define runners for each orchestration engine. # - features.py / features_test.py: defines features for the model. # - hparams.py: defines hyperparameters of the model. # - preprocessing.py / preprocessing_test.py: defines preprocessing jobs using tf::Transform. # - model.py / model_test.py: defines DNN model using TF estimator. # # !ls # You might notice that there are some files with `_test.py` in their name. They are unit tests of the pipeline and it is recommended to add more unit tests as you implement your model. # # You can try to run unit tests simply by supplying test files to `python` binary. # ```bash # python features_test.py # ``` # !python3 features_test.py # ## Step 4. Run your first TFX pipeline # # Copied pipeline can be run using `tfx` cli. In this step, we will create pipelines using two orchestrator engines, Beam and Kubeflow. # # ### 4a. Using Beam orchestrator # [Apache Beam](https://beam.apache.org/) can be used as an orchestrating engine for the pipeline without additional configuration. # # You can create a pipeline using `pipeline create` command. # ```bash # tfx pipeline create --engine=beam --pipeline_path=beam_dag_runner.py # ``` # Then, you can run the created pipeline using `run create` command. # ```sh # tfx run create --engine=beam --pipeline_name="${PIPELINE_NAME}" # ``` # If successful, you'll see `Component CsvExampleGen is finished.` When you copy the template, only one component, CsvExampleGen, is included in the pipeline. Beam orchestrator is useful for local experiments, but a production pipeline usually requires more scalable and stable running environments like, for example, Kubernetes. # ### 4b. Using Kubeflow orchestrator # Components in the TFX pipeline will generate outputs for each run, and they need to be stored somewhere. You can use any storage which the KFP cluster can access, and we will use Google Cloud Storage(GCS) in this document. If you created a KFP cluster in GCP, a default GCS bucket should have been created automatically. It has a name starting with `hostedkfp-default-`. # # To run this pipeline in KFP, you should edit `configs.py` to set your GCS bucket name. You can see your GCS buckets using `gsutil` command. # You can see your buckets using `gsutil`. Following command will show bucket names without prefix and postfix. # !gsutil ls | cut -d / -f 3 # ```bash # gsutil ls # ``` # # Set `GCS_BUCKET_NAME` in `configs.py` without `gs://` or `/`. For example, if `gsutil ls` displayed `gs://my-bucket`, you should set `my-bucket`. # ``` # GCS_BUCKET_NAME = 'my-bucket' # ``` # # <div class="alert alert-danger"> # <b>Note:</b> You MUST set your GCS bucket name in the `configs.py` file before proceed. # </div> # Let's create a pipeline on KFP. # ```bash # tfx pipeline create \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint=${ENDPOINT} \ # --build_target_image=${CUSTOM_TFX_IMAGE} # ``` # <div class="alert alert-warning"> # Note: When creating a pipeline for KFP, we need a container image which will be used to run our pipeline. And `skaffold` will build the image for us. Because skaffold pulls base images from the docker hub, it will take 5~10 minutes when we build the image for the first time, but it will take much less time from the second build. # </div> # !tfx pipeline create \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} \ # --build_target_image={CUSTOM_TFX_IMAGE} # While creating a pipeline, `Dockerfile` and `build.yaml` will be generated to build a docker image. Don't forget to add these files to the source control system(for example, git) along with other source files. # # A pipeline definition file for [argo](https://argoproj.github.io/argo/) will be generated, too. The name of this file is `${PIPELINE_NAME}.tar.gz`. For example, it will be `my_pipeline.tar.gz` if the name of your pipeline is `my_pipeline`. It is recommended NOT to include this pipeline definition file into source control. Because it will be generated from other python files and will be updated whenever you update the pipeline. For your convenience, this file is already listed in `.gitignore` which is generated automatically. # # NOTE: `kubeflow` will be automatically selected as an orchestration engine if `airflow` is not installed and `--engine` is not specified. # Then, you can run the created pipeline using `run create` command. # ```sh # tfx run create --pipeline_name="${PIPELINE_NAME}" --endpoint=${ENDPOINT} # ``` # !tfx run create --pipeline_name={PIPELINE_NAME} --endpoint={ENDPOINT} # Or, you can run the pipeline on the KFP Dashboard, too. # # You can see the run using `run list` or `run status` command. # # ```sh # tfx run list --pipeline_name="${PIPELINE_NAME}" --endpoint=${ENDPOINT} # ``` # However, we recommend visiting your KFP Dashboard using Web Browser. If you launched your KFP cluster in GCP, you can access KFP Dashboard from the Cloud AI Platform Pipelines menu in Google Cloud Console. Once you visit the dashboard, you will be able to find the pipeline, the run and many more information about the pipeline. # For example, you can find your runs under *Experiments* menu, and you can find all your artifacts from the pipeline under *Artifacts* menu. # # <div class="alert alert-warning"> # Note: If your pipeline run fails, you can see detailed logs in the KFP Dashboard. # # One of the major sources of failure is permission related problems. Please make sure your KFP cluster has permissions to access Google Cloud APIs. This can be configured [when you create a KFP cluster in GCP](https://cloud.google.com/ai-platform/pipelines/docs/setting-up), or see [Troubleshooting document in GCP](https://cloud.google.com/ai-platform/pipelines/docs/troubleshooting). # </div> # ## Step 5. Add components for data validation. # # In this step, you will add components for data validation including StatisticsGen, SchemaGen, and ExampleValidator. If you are interested in data validation, please see [Get started with Tensorflow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started). # # Open `pipeline.py` with an editor. Find and uncomment 3 lines which add StatisticsGen, SchemaGen, and ExampleValidator to the pipeline. (Tip: search `TODO(step 5):`) # # You need to update existing pipeline with modified pipeline definition. Use `pipeline update` command with `tfx` cli. # # If you are using beam orchestrator, # ```sh # # Update the pipeline # tfx pipeline update --engine=beam --pipeline_path=beam_dag_runner.py # # You can run the pipeline the same way. # tfx run create --engine beam --pipeline_name "${PIPELINE_NAME}" # ``` # # If you are using Kubeflow orchestrator, # ```sh # # Update the pipeline # tfx pipeline update \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint=${ENDPOINT} # # # You can run the pipeline the same way. # tfx run create --pipeline_name "${PIPELINE_NAME}" # ``` # Update the pipeline # !tfx pipeline update \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # You can run the pipeline the same way. # !tfx run create --pipeline_name {PIPELINE_NAME} --endpoint={ENDPOINT} # ### Check pipeline outputs # If you are using Beam orchestrator, open `data_validation.ipynb` with Jupyter Notebook. # # For Kubeflow Orchestrator, visit KFP dashboard and you can find pipeline outputs in the page for your pipeline run. Click "Experiments" tab on the left, and "All runs" in the Experiments page. You should be able to find the run with the name of your pipeline. # ## Step 6. Add components for training. # # In this step, you will add components for training and model validation including Transform, Trainer, ModelValidator and Pusher. These components are implementing basic ML model using simple DNN. You can find more details about the model in [Tutorial](https://www.tensorflow.org/tfx/tutorials/tfx/airflow_workshop). # # Open `pipeline.py` with an editor. Find and uncomment 4 lines which add Transform, Trainer, ModelValidator and Pusher to the pipeline. (Tip: search `TODO(step 6):`) # # You need to update existing pipeline with modified pipeline definition, again. Updating instruction is the same as Step 5. Please update the pipeline using `pipeline update` and create a run using `run create`. # !tfx pipeline update \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline_name {PIPELINE_NAME} --endpoint={ENDPOINT} # If you are not using Cloud AI Platform Notebook, check the newly trained model with `model_analysis.ipynb` notebook. TFMA Jupyter extension is required to see the visualization. See instructions in the notebook file. # # NOTE: This notebook file doesn't work on Cloud AI Platform Notebook or other JupyterLab environments. # ## Step 7. (*Optional*) Try BigQueryExampleGen. # [BigQuery] is a serverless, highly scalable, and cost-effective cloud data warehouse. BigQuery can be used as a source for training examples in TFX. In this step, we will add `BigQueryExampleGen` to the pipeline. # # Open `pipeline.py` with an editor. Comment out `CsvExampleGen` and uncomment the line which create an instance of `BigQueryExampleGen`. You also need to uncomment `import` statement and `query` argument of the `create_pipeline` function. # # We need to specify which GCP project to use for BigQuery, and this is done by setting `--project` in `beam_pipeline_args` when creating a pipeline. open `configs.py` and uncomment the definition of `GCP_PROJECT_ID`, `GCP_REGION`, `BIG_QUERY_BEAM_PIPELINE_ARGS` and `BIG_QUERY_QUERY`. You should replace the project id and the region value in this file. # <div class="alert alert-danger"> # <b>Note:</b> You MUST set your GCP project ID and region in the `configs.py` file before proceed. # </div> # # Lastly, open `kubeflow_dag_runner.py` (or `beam_dag_runner.py` if you'll use Beam orchestrator) and uncomment two arguments, `query` and `beam_pipeline_args`, for create_pipeline() method. # # Now the pipeline is ready to use BigQuery as an example source. Update the pipeline and create a run as we did in step 5 and 6. # !tfx pipeline update \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline_name {PIPELINE_NAME} --endpoint={ENDPOINT} # ## Step 8. (*Optional*) Try Dataflow with KFP. # # Several [TFX Components uses Apache Beam](https://www.tensorflow.org/tfx/guide/beam) to implement data-parallel pipelines, and it means that you can distribute data processing workloads using [Google Cloud Dataflow](https://cloud.google.com/dataflow/). In this step, we will set the Kubeflow orchestrator to use dataflow as a data processing back-end of a Apache Beam. # # Open `configs.py` with an editor, and uncomment the definition of `GCP_PROJECT_ID`, `GCP_REGION`, and `BEAM_PIPELINE_ARGS`. Open `kubeflow_dag_runner.py` and uncomment `beam_pipeline_args`. (Comment out current `beam_pipeline_args` what you added in Step 7.) # # Now the pipeline is ready to use Dataflow. Update the pipeline and create a run as we did in step 5 and 6. # !tfx pipeline update \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline_name {PIPELINE_NAME} --endpoint={ENDPOINT} # You can find your Dataflow jobs in [Dataflow in Cloud Console](http://console.cloud.google.com/dataflow). # ## Step 9. (*Optional*) Try Cloud AI Platform Training and Prediction with KFP. # TFX interoperates with serveral managed GCP services, such as [Cloud AI Platform for Training and Prediction](https://cloud.google.com/ai-platform/). You can set your Trainer component to use Cloud AI Platform Training, a managed service for ML training workload. Moreover, when your model is built and ready to be served, you can *push* your model to Cloud AI Platform Prediction for serving. In this step, we will set our `Trainer` and `Pusher` component to use Cloud AI Platform services. # # Before editing files, you might have to enable [AI Platform Training & Prediction API] first. # # Open `configs.py` with an editor, and uncomment the definition of `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_AI_PLATFORM_TRAINING_ARGS` and `GCP_AI_PLATFORM_SERVING_ARGS`. We will use our custom built container image to train a model in Cloud AI Platform Training, so we should set `masterConfig.imageUri` in `GCP_AI_PLATFORM_TRAINING_ARGS` to the same value as `CUSTOM_TFX_IMAGE` above. # # Open `kubeflow_dag_runner.py` and uncomment `ai_platform_training_args` and `ai_platform_serving_args`. # # Update the pipeline and create a run as we did in step 5 and 6. # !tfx pipeline update \ # --pipeline_path=kubeflow_dag_runner.py \ # --endpoint={ENDPOINT} # !tfx run create --pipeline_name {PIPELINE_NAME} --endpoint={ENDPOINT} # You can find your training jobs in [Cloud AI Platform Jobs](https://console.cloud.google.com/ai-platform/jobs). If your pipeline was completed successfully, you can find your model in [Cloud AI Platform Models](https://console.cloud.google.com/ai-platform/models). # ## Step 10. Ingest YOUR data to the pipeline. # # We made a pipeline for a model using Chicago Taxi dataset. Now it's time to put your data into the pipeline. # Your data can be stored anywhere your pipeline can access including GCS, BigQuery. You need to modify the pipeline definition to accomodate your data. # # 1. If your data is stored in files, modify `DATA_PATH` in `kubeflow_dag_runner.py` or `beam_dag_runner.py` to the location. If your data is stored in BigQuery, modify `BIG_QUERY_QUERY` in configs.py to your query statement. # 1. Add features in `features.py`. # 1. Modify `preprocessing.py` to [transform input data for training](https://www.tensorflow.org/tfx/guide/transform). # 1. Modify `model.py` and `hparams.py` to [describe your ML model](https://www.tensorflow.org/tfx/guide/trainer). # # Please see [Trainer component guide](https://www.tensorflow.org/tfx/guide/trainer) for more introduction. # ## Cleaning up # # To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. # # Alternatively, you can clean up individual resources by visiting each consoles: # - [Google Cloud Storage](https://console.cloud.google.com/storage) # - [Google Container Registry](https://console.cloud.google.com/gcr) # - [Google Kubernetes Engine](https://console.cloud.google.com/kubernetes) #
docs/tutorials/tfx/template_beam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cross-validating bayes chime methods # # Thanks to <NAME> for explaining the magic of `if __name__ == "__main__":` to me. # # This branch changes bayeschime to make it work from python, rather than the command line. It also adapt's Christian Koerber's implementation to make it run from python, rather than the command line, or the jupyter notebooks that he originally put together. # # cd .. from bayes_chime.bayeschime_xval import bayes_xval print(bayes_xval) # Hard-coding the 7 for the notebook version, to look 7 days out. # + tuples_for_starmap = [(7, j) for j in ['PMC', "LGH", "HUP", "CCH", 'PAH', 'MCP']] import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) outdicts = pool.starmap(bayes_xval, tuples_for_starmap) pool.close() # - # Show the residual plots # + from scipy.stats import probplot # %pylab inline import matplotlib.pyplot as plt def plotr(d, name): fig, ax = plt.subplots(nrows = 1, ncols = 2, figsize = [10,5]) ax[0].plot(d['hq_m'], label = "mcmc") ax[0].plot(d['hq_n'], label = "gaussian") ax[0].legend() ax[0].set_ylabel("quantile") ax[0].set_xlabel("day of past week") ax[0].set_title("Hosp") ax[0].set_ylim(0,1.1) ax[1].plot(d['vq_m'], label = "mcmc") ax[1].plot(d['vq_n'], label = "gaussian") ax[1].legend() ax[1].set_ylabel("quantile") ax[1].set_xlabel("day of past week") ax[1].set_title("Vent") ax[1].set_ylim(0,1.1) fig.suptitle(name) plt.tight_layout() def plotq(d, name): fig, ax = plt.subplots(nrows = 2, ncols = 2, figsize = [10,10]) probplot(d['resh_m'], dist = "norm", plot = ax[0,0]) ax[0,0].set_title(f"Residuals, hospital, mcmc") probplot(d['resh_n'], dist = "norm", plot = ax[0,1]) ax[0,1].set_title(f"Residuals, hospital, gaussian") probplot(d['resv_m'], dist = "norm", plot = ax[1,0]) ax[1,0].set_title(f"Residuals, vent, mcmc") probplot(d['resv_n'], dist = "norm", plot = ax[1,1]) ax[1,1].set_title(f"Residuals, vent, gaussian") fig.suptitle(name) plt.tight_layout() # - # for i in outdicts: plotr(i['plotr'], i['which_hospital']) for i in outdicts: plotq(i['plotq'], i['which_hospital'])
notebooks/bayesmethod_xval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # discrete_tf - Pythonize # # Use Python's OOP to 'pythonize' this model more. # # ![](discrete_tf.png) # # Python Setup import ctypes import os from rtwtypes import * # + class DiscreteTF(object): def __init__(self, model= "discrete_tf"): self.model=model self.dll_path = os.path.abspath(f"{model}_win64.dll") self.dll = ctypes.windll.LoadLibrary(self.dll_path) # Model entry point functions self.initialize = getattr(self.dll, f"{model}_initialize") self.init = self.initialize self.step = getattr(self.dll, f"{model}_step") model_terminate = getattr(self.dll, f"{model}_terminate") self._output = real_T.in_dll(self.dll, 'OutputSignal') self._time = real_T.in_dll(self.dll, 'SimTime') self._input_signal = real_T.in_dll(self.dll, 'InputSignal') self._num = (real_T*2).in_dll(self.dll, "num") self._den = (real_T*2).in_dll(self.dll, "den") # Signals @property def output(self): return float(self._output.value) @property def time(self): return float(self._time.value) # Parameters @property def num(self): return list(mdl._num) @num.setter def num(self, value): assert len(value)==2 mdl._num[0]=float(value[0]) mdl._num[1]=float(value[1]) @property def den(self): return list(mdl._den) @den.setter def den(self, value): assert len(value)==2 mdl._den[0]=float(value[0]) mdl._den[1]=float(value[1]) @property def input_signal(self): return float(self._input_signal.value) # a setter function @input_signal.setter def input_signal(self, value): self._input_signal.value=value def __repr__(self): return f"{self.model}<{self.time}, {self.input_signal}, {self.output}>" mdl = DiscreteTF() mdl.init() # - mdl mdl.step() mdl
Example2/discrete_tf-python_class.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # # Convergence Check # # This notebook reads model estimate files and checks Rhat statistic if convergence is given. # # In sample # + model_parameters = dict(additive=['v', 'gamma', 's', 'tau'], GLAM=['v', 'gamma', 's', 'tau'], nobias=['v', 's', 'tau']) base_dir = os.path.join('results', 'estimates') epsilon = 0.05 # we'll diagnose convergence if |Rhat - 1| < epsilon # + results_ins = [] models = ['additive', 'GLAM', 'nobias'] for model in models: files = [file for file in os.listdir(os.path.join(base_dir, 'in_sample', model)) if file.endswith('.csv')] parameters = [parameter + '__0_0' for parameter in model_parameters[model]] for file in files: _, subject, _, _ = file.split('_') subject = int(subject) estimates = pd.read_csv(os.path.join(base_dir, 'in_sample', model, file), index_col=0) converged = np.alltrue(np.abs(estimates.loc[parameters, 'Rhat'] - 1) < epsilon) if not converged: print('No convergence for subject {}, {} model.'.format(subject, model)) result = pd.DataFrame(dict(subject=subject, model=model, converged=converged), index=np.ones(1) * subject) results_ins.append(result) results_ins = pd.concat(results_ins).sort_values(['subject', 'model']).reset_index(drop=True) if (results_ins['converged'].mean() == 1.0): print('In-sample fits have converged for all participants and models.') # - # # Out of sample # + model_parameters = dict(additive=['v', 'gamma', 's', 'tau'], multiplicative=['v', 'gamma', 's', 'tau'], nobias=['v', 's', 'tau']) base_dir = os.path.join('results', 'estimates') epsilon = 0.05 # we'll diagnose convergence if |Rhat - 1| < epsilon # + results_oos = [] models = ['multiplicative', 'nobias'] for model in models: files = [file for file in os.listdir(os.path.join(base_dir, 'out_of_sample', model)) if file.endswith('.csv')] parameters = [parameter + '__0_0' for parameter in model_parameters[model]] for file in files: _, subject, _, _ = file.split('_') subject = int(subject) estimates = pd.read_csv(os.path.join(base_dir, 'out_of_sample', model, file), index_col=0) converged = np.alltrue(np.abs(estimates.loc[parameters, 'Rhat'] - 1) < epsilon) if not converged: print('No convergence for subject {}, {} model.'.format(subject, model)) result = pd.DataFrame(dict(subject=subject, model=model, converged=converged), index=np.ones(1) * subject) results_oos.append(result) results_oos = pd.concat(results_oos).sort_values(['subject', 'model']).reset_index(drop=True) if (results_oos['converged'].mean() == 1.0): print('Out-of-sample fits have converged for all participants and models.') # -
SI_0_convergence_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import sys sys.path.append( '/Users/bpb/Data/programming/MetaboliteAtlas/metatlas2p0/metatlas/metatlas' ) import h5_query as h5q import mzml_loader as mzml_loader import numpy as np import tables import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format = 'svg' mypath = '/Users/bpb/Downloads/20150326_BN6_BN7/' # mypath = '/Users/bpb/Downloads/' # + # import os # for file in os.listdir(mypath): # if file.endswith(".mzML"): # filename = '%s%s' % (mypath,file) # mzml_loader.mzml_to_hdf(filename) # print filename # - # myfile = '20141211_caj211_Ecoli_unlabelled_glucose_POS.h5' # myfile = '2015326_pHILIC_ACN_POS_MSMS_BN7_1062_SAR11_media_WithPolyamines_PPLex_95_Run36.h5' # myfile = '2015326_pHILIC_ACN_POS_MSMS_BN6_1062_SAR11_media_NoOsmolytes_PPLex_26_Run32.h5' # myfile = '2015326_pHILIC_ACN_POS_MSMS_BN6_1062_SAR11_media_WithOsmolytes_PPLex_43_Run21.h5' myfile = '2015326_pHILIC_ACN_POS_MSMS_BN6_7211_SAR11_media_WithOsmolytes_PPLex_53_Run22.h5' # myfile = '2015326_pHILIC_ACN_NEG_MSMS_BN6_1062_SAR11_media_WithOsmolytes_PPLex_42_Run15.h5' # myfile = '2015326_pHILIC_ACN_NEG_MSMS_BN6_7211_SAR11_media_WithOsmolytes_PPLex_53_Run22.h5' filename = '%s%s' % (mypath,myfile) fid = tables.open_file(filename) # get total spectrum min_rt = 12 max_rt = 14 ms_level = 2 polarity = 1 mz,intensity = h5q.get_spectrogram(fid, min_rt, max_rt, ms_level, polarity,bins=1e6) idx = np.argsort(intensity) for i in idx: if intensity[i] > 1: if mz[i] < 140: print mz[i] plt.plot(mz,intensity) plt.xlim(0,140) plt.show() # print len(rt) # get coarse 2d hist mzEdges = np.linspace(80, 220,500) rtEdges = np.linspace(2,15,500) ms_level = 1 polarity = 1 hMap = h5q.get_HeatMapRTMZ(fid,mzEdges,rtEdges,ms_level,polarity) h5q.plot_heatmap((hMap['arr']+1)**0.5,hMap['rt_bins'],hMap['mz_bins'],title='entire file') plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) plt.plot(hMap['rt_bins'],np.max(hMap['arr'],axis=0)) plt.show # mz_theor = 89.0477 - 1.007276 # sarcosine minus proton # mz_theor = 89.0477 + 59.013851 # sarcosine plus acetate anion # mz_theor = 89.0477 + 1.007276 # sarcosine plus proton # mz_theor = 105.0426 + 1.007276 # serine plus proton # mz_theor = 105.0426 + 59.013851# serine plus acetate anion # mz_theor = 149.051024 + 1.007276 # mz_theor = 202.033819322 - 1.007276 #5-hydroxy-2-oxo-4-ureido-2,5-dihydro-1H-imidazole-5-carboxylate # mz_theor = 132.90533751 mz_theor = 132.904971 #bromomethane plus potassium print mz_theor polarity = 1 rt,intensity = h5q.get_XIC(fid, mz_theor - mz_theor*5/1e6, mz_theor + mz_theor*5/1e6, 1, polarity) plt.plot(rt,intensity) plt.xlim(10,15) plt.show() print len(rt) ms_level=2 rt_min = 12 rt_max = 14 data = h5q.get_data(fid, ms_level, polarity, min_mz=0, max_mz=mz_theor+10, min_rt=rt_min, max_rt=rt_max, min_precursor_MZ=mz_theor - mz_theor*100/1e6, max_precursor_MZ=mz_theor + mz_theor*100/1e6) # min_precursor_intensity=0, # max_precursor_intensity=0, # min_collision_energy=0, # max_collision_energy=0) plt.vlines(data['mz'],0,data['i'],color='k',linestyles='solid') plt.xlabel('m/z') plt.ylabel('intensity') # plt.plot(data['mz'],data['i']) plt.show() idx = np.argsort(data['i']) print data['mz'][idx] mzEdges = np.linspace(mz_theor - 5.5, mz_theor + 5.1,100) rtEdges = np.linspace(10,16,100) ms_level = 1 polarity = 1 hMap = h5q.get_HeatMapRTMZ(fid,mzEdges,rtEdges,ms_level,polarity) h5q.plot_heatmap(hMap['arr'],hMap['rt_bins'],hMap['mz_bins'],title='Maybe Methyl Bromide') plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) mz_centroid = np.sum( np.multiply( np.sum(hMap['arr'],axis=1), hMap['mz_bins']) ) / np.sum( hMap['arr'] ) print "The measured centroid from this 2d hist is %5.5f" % mz_centroid mzEdges = np.linspace(mz_theor - 0.05, mz_theor + 0.05, 100) rtEdges = np.linspace(12,15,100) ms_level = 1 polarity = 1 hMap = h5q.get_HeatMapRTMZ(fid,mzEdges,rtEdges,ms_level,polarity) h5q.plot_heatmap(hMap['arr'],hMap['rt_bins'],hMap['mz_bins'],title='Methionine') plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) mz_centroid = np.sum( np.multiply( np.sum(hMap['arr'],axis=1), hMap['mz_bins']) ) / np.sum( hMap['arr'] ) print "The measured centroid from this 2d hist is %5.5f" % mz_centroid mzEdges = np.linspace(mz_theor - 0.001, mz_theor + 0.001,100) rtEdges = np.linspace(11,15,100) ms_level = 1 polarity = 1 hMap = h5q.get_HeatMapRTMZ(fid,mzEdges,rtEdges,ms_level,polarity) h5q.plot_heatmap(hMap['arr'],hMap['rt_bins'],hMap['mz_bins'],title='Methionine') plt.gca().get_yaxis().get_major_formatter().set_useOffset(False) mz_centroid = np.sum( np.multiply( np.sum(hMap['arr'],axis=1), hMap['mz_bins']) ) / np.sum( hMap['arr'] ) print "The measured centroid from this 2d hist is %5.5f" % mz_centroid print hMap['arr'].shape print hMap['mz_bins'].shape print hMap['rt_bins'].shape
docs/example_notebooks/utilities/molecule hunter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math import os import time import sys import numpy as np import mxnet as mx import matplotlib.pyplot as plt from mxnet import gluon, autograd from mxnet.gluon import nn, rnn #let's use cpu for now context = mx.cpu() # - # # Test import, ignore from NLP.src.training import training # # Create a sequence # We don't want the sequence to be completely random, otherwise the RNN won't be able to learn much # # When we got a word sequence, we need to map each unique word to an integer and the integer sequence will be fed into the neural net # + vocab_size = 50 x = mx.np.arange(vocab_size).as_nd_ndarray() seq = mx.nd.tile(x, reps=2000) # - seq # # DL portion # ## RNN Model, we can modify this for more complex model structure # bluntly stolen from https://gluon.mxnet.io/chapter05_recurrent-neural-networks/rnns-gluon.html class RNNModel(gluon.Block): """A model with an encoder, recurrent layer, and a decoder.""" def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer = mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, num_layers, activation='relu', dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units = num_hidden, params = self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units = num_hidden) self.num_hidden = num_hidden def forward(self, inputs, hidden): emb = self.drop(self.encoder(inputs)) #print(emb) output, hidden = self.rnn(emb, hidden) output = self.drop(output) decoded = self.decoder(output.reshape((-1, self.num_hidden))) return decoded, hidden def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) # ## training loop # ### auxiliary functions # + # change the data sequence to a batched sequence def batchify(data, batch_size): """Reshape data into (num_example, batch_size)""" nbatch = data.shape[0] // batch_size data = data[:nbatch * batch_size] data = data.reshape((batch_size, nbatch)).T return data def get_batch(source, i): data = source[i] target = source[i + 1] return data.reshape((1,len(data))), target.reshape((-1,)) # Compute loss from data_source and the current net def eval(data_source): total_L = 0.0 ntotal = 0 hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx=context) for i in range(0, data_source.shape[0] - 1): data, target = get_batch(data_source, i) output, hidden = model(data, hidden) L = loss(output, target) total_L += mx.nd.sum(L).asscalar() ntotal += L.size return total_L / ntotal # - # ### training loop def train(train_data, args_epochs, args_batch_size, context, args_log_interval=10): loss_progress = [] for epoch in range(args_epochs): total_L = 0.0 hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx = context) for ibatch in range(0, train_data.shape[0] - 1): data, target = get_batch(train_data, ibatch) #need this to work, but it doesn't atm #hidden = detach(hidden) with autograd.record(): output, hidden = model(data, hidden) L = loss(output, target) L.backward() trainer.step(args_batch_size) total_L += mx.nd.sum(L).asscalar() # print and record loss every epoch epoch_L = total_L / args_batch_size / ibatch print('[Epoch %d] loss %.2f' % (epoch + 1, epoch_L), end='\r') loss_progress.append(epoch_L) total_L = 0.0 plt.plot(np.arange(args_epochs), loss_progress) plt.grid() plt.xlabel('Epochs') plt.ylabel('Loss') # ## Prepare for training # + #model constants num_embed = 5 num_hidden = 5 num_layers = 1 #training constants args_lr = 0.04 args_epochs = 30 args_batch_size = 32 # - train_data = batchify(seq, args_batch_size).as_in_context(context) # define model and loss # we do dropout=0 here to promote overfitting, on real data we might want a positive value model = RNNModel(mode='lstm', vocab_size=vocab_size, num_embed=num_embed, num_hidden=num_hidden, num_layers=num_layers, dropout=0) model.collect_params().initialize(mx.init.Xavier(), ctx=context) trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': args_lr, 'momentum': 0, 'wd': 0}) loss = gluon.loss.SoftmaxCrossEntropyLoss() # # Finally, train the RNN # finally train the model train(train_data=train_data, args_epochs=args_epochs, args_batch_size=args_batch_size, context=context, args_log_interval=3000) # # Decoder, use the RNN we just learned to generate a sequence # # ## Here we just use a gready decoder, may do something fancier later # + seed = mx.nd.array([[0]]) seq_length = 10 hidden = model.begin_state(func=mx.nd.zeros, batch_size=1, ctx=context) output_seq = [] for i in range(seq_length): output, hidden = model(seed, hidden) seed = output.argmax().reshape((1,1)) output_seq.append(seed[0].asscalar()) output_seq # - # # Auxiliary cells, don't read embed = model.encoder.weight.data() norms = (embed**2).sum(axis = 1) norms = norms.asnumpy() plt.plot(np.arange(vocab_size), norms) # + seed = mx.nd.array([[0]]) seq_length = 50 hidden = model.begin_state(func=mx.nd.zeros, batch_size=1, ctx=context) output, hidden = model(seed, hidden) plt.plot(np.arange(vocab_size), output[0].asnumpy())
NLP/.ipynb_checkpoints/demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gun Deaths in the US # ### Gun data import csv data = list(csv.reader(open("guns.csv","r"))) data[:5] # ### Headers seperated from data into: # **headers** and **data** headers = data[0] data = data[1:] headers # #### Dictionary of deaths per year with list comprehension years = [row[1] for row in data] year_counts = {} for item in years: if item in year_counts: year_counts[item] += 1 else: year_counts[item] = 1 year_counts # #### Date counts using Datetime import datetime dates = [datetime.datetime(year = int(row[1]), month = int(row[2]), day = 1) for row in data] dates[:5] date_counts = {} for item in dates: if item in date_counts: date_counts[item] += 1 else: date_counts[item] = 1 date_counts # #### Sex and Race counts following similar procedure sex = [row[5] for row in data] race = [row[7] for row in data] sex_counts = {} for item in sex: if item in sex_counts: sex_counts[item] += 1 else: sex_counts[item] = 1 race_counts = {} for item in race: if item in race_counts: race_counts[item] += 1 else: race_counts[item] = 1 sex_counts race_counts # ### Gathering census data census = list(csv.reader(open("census.csv" , "r"))) census # #### Comparing gun data and census data to get a better picture by finding gun deaths per 100,000 people of each race mapping = {"Asian/Pacific Islander": int(census[1][-2]) + int(census[1][-3]), "Black": int(census[1][-5]), "Native American/Native Alaskan": int(census[1][-4]), "Hispanic": int(census[1][-6]), "White": int(census[1][-7])} mapping race_per_hunderdk = {} for race,number in race_counts.items(): race_per_hunderdk[race] = (number / mapping[race]) * 100000 race_per_hunderdk # #### Filtering for homicide intents = [row[3] for row in data] races = [row[7] for row in data] homicide_race_counts = {} for i, race, in enumerate(races): if intents[i] == "Homicide": if race in homicide_race_counts: homicide_race_counts[race] += 1 else: homicide_race_counts[race] = 1 homicide_race_counts homicide_race_per_hundredk = {} for race, number in homicide_race_counts.items(): homicide_race_per_hundredk[race] = (number / mapping[race] * 100000) homicide_race_per_hundredk
Gun Deaths.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CRC Make-A-Thon Crash Course # ## Let's manipulate text (Called 'Strings' or 'Character Arrays' in programmer speak) # When you are working with text in Python you enclose the text in quotations. 'I am a string' # Python does not care about which type of quotation you use. "I am also a string" # Try creating a string in the code block below that says something (it can be funny!) # We can **assign** text (or strings) to a variable my_awesome_string = 'You guys are great!' # Note that after you run that block there is no 'Out[#]' block or any output at all, if we want to see what is in a variable we have to print() print(my_awesome_string) # You can also (in jupyter notebook at least) just put the variable by itself to generate an output my_awesome_string # You can make strings any length you want, by convention long strings are divided up into lines and formatted neatly. long_string = ('This is a story of a well. This well was a special plan by geologist to test his ' 'his idea, he was sure it would work. He sent the points off to his drilling engi' 'neer, and his manager was excited. Permits in hand they drilled the well. After' ' many trials and bits they finally reached TD. There was no oil in the zone the' 'geologist targeted, fighting back the urge to cry he desparately recommended a' ' test of a zone up hole that looked promising. It was a monster, producing thou' 'sands of barrels a day. He was hailed as a hero for his genius. (And lived happ' 'ily every after)') print(long_string) # You can search in strings and replace parts of it new_long_string = long_string.replace('well', 'man') print(new_long_string) # The new string doesn't make much sense... but I'm sure you can find better uses for 'replace' than that # Try changing the insult below into a compliment an_insult = 'You are fat' # You can also swap out individual letters alphabet_start = 'dbc' print(alphabet_start) new_alphabet_start = alphabet_start.replace('d', 'a') print(new_alphabet_start) # You can select a character from a string as well by using the variable and brackets with the **index** of the character numbers = '0123456789' print(numbers[0]) print(numbers[9]) some_letters = 'abcdefghijk' print(some_letters[0]) # If you try to set a character in a string directly you will get an error: numbers[0] = 'a' # We will cover some ways around this limitation later # You can pass variables to strings strong_statement = 'I would never say, "{}."'.format(an_insult) print(strong_statement) # You can also pass numbers to strings number_of_wells = 'There are {} wells.'.format(5) print(number_of_wells) # You can also alter the case of text yell_insult = an_insult.upper() print(yell_insult) whisper_insult_sneakily = an_insult.lower() print(whisper_insult_sneakily) # You can also handle delimiters like "," or "\t" (tab) some_numbers = '10,20,30' split_numbers = some_numbers.split(',') print(split_numbers) # Let's Apply all of this by playing with API Numbers
CRC Make A Thon Crash Course Lesson 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # ํ–‰๋ ฌ์˜ ์—ฐ์‚ฐ๊ณผ ์„ฑ์งˆ # ํ–‰๋ ฌ์—๋Š” ๊ณฑ์…ˆ, ์ „์น˜ ์ด์™ธ์—๋„ ์ง€์ˆ˜ ํ•จ์ˆ˜ ๋“ฑ์˜ ๋‹ค์–‘ํ•œ ์—ฐ์‚ฐ์„ ์ •์˜ํ•  ์ˆ˜ ์žˆ๋‹ค. ๊ฐ๊ฐ์˜ ์ •์˜์™€ ์„ฑ์งˆ์„ ์•Œ์•„๋ณด์ž. # ## ํ–‰๋ ฌ์˜ ๋ถ€ํ˜ธ # ํ–‰๋ ฌ์€ ๋ณต์ˆ˜์˜ ์‹ค์ˆ˜ ๊ฐ’์„ ๊ฐ€์ง€๊ณ  ์žˆ์œผ๋ฏ€๋กœ ํ–‰๋ ฌ ์ „์ฒด์˜ ๋ถ€ํ˜ธ๋Š” ์ •์˜ํ•  ์ˆ˜ ์—†๋‹ค. ํ•˜์ง€๋งŒ ํ–‰๋ ฌ์—์„œ๋„ ์‹ค์ˆ˜์˜ ๋ถ€ํ˜ธ ์ •์˜์™€ ์œ ์‚ฌํ•œ ๊ธฐ๋Šฅ์„ ๊ฐ€์ง€๋Š” ์ •์˜๊ฐ€ ์กด์žฌํ•œ๋‹ค. ๋ฐ”๋กœ ํ–‰๋ ฌ์˜ ์–‘-ํ•œ์ •(positive definite) ํŠน์„ฑ์ด๋‹ค. (์ •๋ฐฉํ–‰๋ ฌ์— ํ•œ์ •๋จ) # # ์ฟผ๋“œ๋ผํ‹ฑ Form์˜ ๊ฒฐ๊ณผ๋Š” ์‹ค์ˆ˜๊ฐ’ # # ๋ชจ๋“  ์‹ค์ˆ˜ ๊ณต๊ฐ„ $\mathbb{R}^n$ ์˜ 0๋ฒกํ„ฐ๊ฐ€ ์•„๋‹Œ ๋ฒกํ„ฐ $x \in \mathbb{R}^n$ ์— ๋Œ€ํ•ด ๋‹ค์Œ ๋ถ€๋“ฑ์‹์ด ์„ฑ๋ฆฝํ•˜๋ฉด ํ–‰๋ ฌ $A$ ๊ฐ€ ์–‘-ํ•œ์ •(positive definite)์ด๋ผ๊ณ  ํ•œ๋‹ค. # # $$ x^T A x > 0 $$ # # ๋งŒ์•ฝ ์ด ์‹์ด ๋“ฑํ˜ธ๋ฅผ ํฌํ•จํ•œ๋‹ค๋ฉด ์–‘-๋ฐ˜ํ•œ์ •(positive semi-definite)์ด๋ผ๊ณ  ํ•œ๋‹ค. # # $$ x^T A x \geq 0 $$ # ์˜ˆ๋ฅผ ๋“ค์–ด ๋‹จ์œ„ ํ–‰๋ ฌ์€ ์–‘-ํ•œ์ •์ด๋‹ค. # # $$ x^TI x = x^T # \begin{bmatrix} # 1&0&\cdots&0\\ # 0&1&\cdots&0\\ # \vdots&\vdots&\ddots&\vdots\\ # 0&0&\cdots&1\\ # \end{bmatrix} # x # = x_1^2 + x_2^2 + \cdots + x_n^2 > 0 # $$ # # # ๋‹ค์Œ๊ณผ ๊ฐ™์€ ํ–‰๋ ฌ๋„ ์–‘-ํ•œ์ •์ด๋‹ค. # # $$ M = \begin{bmatrix} 2&-1&0\\-1&2&-1\\0&-1&2 \end{bmatrix} $$ # $$ # \begin{align} # x^{\mathrm{T}}M x # &= \begin{bmatrix} (2x_1-b)&(-x_1+2x_2-x_3)&(-x_2+2c) \end{bmatrix} \begin{bmatrix} x_1\\x_2\\x_3 \end{bmatrix} \\ # &= 2{x_1}^2 - 2x_1x_2 + 2{x_2}^2 - 2x_2x_3 + 2{x_3}^2 \\ # &= {x_1}^2+(x_1 - x_2)^{2} + (x_2 - x_3)^{2}+{x_3}^2 # \end{align} # $$ # ## ํ–‰๋ ฌ์˜ ํฌ๊ธฐ # ํ–‰๋ ฌ์—๋Š” ํฌ๊ธฐ ๊ฐœ๋…๊ณผ ์œ ์‚ฌํ•˜๊ฒŒ ํ•˜๋‚˜์˜ ํ–‰๋ ฌ์— ๋Œ€ํ•ด ํ•˜๋‚˜์˜ ์‹ค์ˆ˜๋ฅผ ๋Œ€์‘์‹œํ‚ค๋Š” norm, ๋Œ€๊ฐ ์„ฑ๋ถ„(trace), ํ–‰๋ ฌ์‹(determinant)์— ๋Œ€ํ•œ ์ •์˜๊ฐ€ ์กด์žฌํ•œ๋‹ค. # ### ํ–‰๋ ฌ Norm # ํ–‰๋ ฌ์˜ norm ์ •์˜๋Š” ๋‹ค์–‘ํ•˜์ง€๋งŒ ๊ทธ ์ค‘ ๋งŽ์ด ์“ฐ์ด๋Š” induced p-norm ์ •์˜๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. ์‹œ๊ทธ๋งˆ2๊ฐœ : ๊ฐ€๋กœ / ์„ธ๋กœ๋กœ ํ–‰๋ ฌํ™” ํ•ด์„œ ๋‹ค ๋”ํ•ด๋ผ. 1/p๋กœ ์ฐจ์›์„ ์ถ•์†Œ # # $$ \Vert A \Vert_p = \left( \sum_{i=1}^m \sum_{j=1}^n |a_{ij}|^p \right)^{1/p} $$ # # ์ด ์ค‘ $p=2$๋Š” ํŠน๋ณ„ํžˆ Frobenius norm ์ด๋ผ๊ณ  ๋ถˆ๋ฆฌ๋ฉฐ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ํ‘œ์‹œํ•œ๋‹ค. # # $$ \Vert A \Vert_F = \sqrt{\sum_{i=1}^m \sum_{j=1}^n a_{ij}^2} $$ # NumPy์—์„œ๋Š” linalg ์„œ๋ธŒํŒจํ‚ค์ง€์˜ `norm` ๋ช…๋ น์œผ๋กœ Frobenious norm์„ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋‹ค. A = (np.arange(9) - 4).reshape((3, 3)) A np.linalg.norm(A) # ### ๋Œ€๊ฐ ์„ฑ๋ถ„ # ๋Œ€๊ฐ ์„ฑ๋ถ„(trace) ํ–‰๋ ฌ์˜ ํŠน์„ฑ์„ ๊ฒฐ์ •ํ•˜๋Š” ์ˆซ์ž ์ค‘ ํ•˜๋‚˜๋กœ ์ •๋ฐฉ ํ–‰๋ ฌ(square matrix)์— ๋Œ€ํ•ด์„œ๋งŒ ์ •์˜๋˜๋ฉฐ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ๋Œ€๊ฐ ์„ฑ๋ถ„(diaginal)์˜ ํ•ฉ์œผ๋กœ ๊ณ„์‚ฐ๋œ๋‹ค. # # $$ \operatorname{tr}(A) = a_{11} + a_{22} + \dots + a_{nn}=\sum_{i=1}^{n} a_{ii} $$ # ๋Œ€๊ฐ ์„ฑ๋ถ„์€ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์„ฑ์งˆ์„ ์ง€๋‹Œ๋‹ค. # # $$ \text{tr} (cA) = c\text{tr} (A) $$ # # $$ \text{tr} (A^T) = \text{tr} (A) $$ # # $$ \text{tr} (A + B) = \text{tr} (A) + \text{tr} (B)$$ # # $$ \text{tr} (AB) = \text{tr} (BA) $$ # # $$ \text{tr} (ABC) = \text{tr} (BCA) = \text{tr} (CAB) $$ # # # ์‹ค์ˆ˜๋Š” ๊ทธ ์ž์ฒด๊ฐ€ trace์ž„ # ํŠนํžˆ ๋งˆ์ง€๋ง‰ ์„ฑ์งˆ์€ trace trick์ด๋ผ๊ณ  ํ•˜์—ฌ ์ด์ฐจ ํ˜•์‹(quadratic form)์˜ ๊ฐ’์„ ๊ตฌํ•˜๋Š”๋ฐ ์œ ์šฉํ•˜๊ฒŒ ์‚ฌ์šฉ๋œ๋‹ค. # # $$ x^TAx = \text{tr}(x^TAx) = \text{tr}(Axx^T) = \text{tr}(xx^TA) $$ # NumPy์—์„œ๋Š” `trace` ๋ช…๋ น์œผ๋กœ trace๋ฅผ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋‹ค. np.trace(np.eye(3)) # ### ํ–‰๋ ฌ์‹ # ์ •๋ฐฉ ํ–‰๋ ฌ $A$์˜ ํ–‰๋ ฌ์‹(determinant) $\det (A)$ ๋Š” Laplace formula๋ผ๊ณ  ๋ถˆ๋ฆฌ๋Š” ์žฌ๊ท€์ ์ธ ๋ฐฉ๋ฒ•์œผ๋กœ ์ •์˜๋œ๋‹ค. # # ์ด ์‹์—์„œ $a_{i,j}$๋Š” $A$์˜ iํ–‰, j์—ด ์›์†Œ์ด๊ณ  $M_{i,j}$์€ ์ •๋ฐฉ ํ–‰๋ ฌ $A$ ์—์„œ iํ–‰๊ณผ j์—ด์„ ์ง€์›Œ์„œ ์–ป์–ด์ง„ ํ–‰๋ ฌ์˜ ํ–‰๋ ฌ์‹์ด๋‹ค. # # $$ \det(A) = \sum_{j=1}^n (-1)^{i+j} a_{i,j} M_{i,j} $$ # # ํ–‰๋ ฌ์‹์€ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์„ฑ์งˆ์„ ๋งŒ์กฑํ•œ๋‹ค. # # # $$ \det(I) = 1 $$ # # $$ \det(A^{\rm T}) = \det(A) $$ # # $$ \det(A^{-1}) = \frac{1}{\det(A)}=\det(A)^{-1} $$ # # $$ \det(AB) = \det(A)\det(B) $$ # # $$ A \in \mathbf{R}^n \;\;\; \rightarrow \;\;\; \det(cA) = c^n\det(A) $$ # # ๋˜ํ•œ ์—ญํ–‰๋ ฌ์€ ํ–‰๋ ฌ์‹๊ณผ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ๊ด€๊ณ„๋ฅผ ๊ฐ€์ง„๋‹ค. # # $$ A^{-1} = \dfrac{1}{\det A} M = \dfrac{1}{\det A} # \begin{bmatrix} # M_{1,1}&\cdots&M_{1,n}\\ # \vdots&\ddots&\vdots\\ # M_{n,1}&\cdots&M_{n,n}\\ # \end{bmatrix} # $$ # NumPy์—์„œ๋Š” linalg ์„œ๋ธŒํŒจํ‚ค์ง€์˜ `det` ๋ช…๋ น์œผ๋กœ det๋ฅผ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋‹ค. A = np.array([[1, 2], [3, 4]]) A np.linalg.det(A) # ## ์ „์น˜ ํ–‰๋ ฌ๊ณผ ๋Œ€์นญ ํ–‰๋ ฌ # ์ „์น˜ ์—ฐ์‚ฐ์„ ํ†ตํ•ด์„œ ์–ป์–ด์ง„ ํ–‰๋ ฌ์„ ์ „์น˜ ํ–‰๋ ฌ(transpose matrix)์ด๋ผ๊ณ  ํ•œ๋‹ค. # # $$ [\mathbf{A}^\mathrm{T}]_{ij} = [\mathbf{A}]_{ji} $$ # ๋งŒ์•ฝ ์ „์น˜ ํ–‰๋ ฌ๊ณผ ์›๋ž˜์˜ ํ–‰๋ ฌ์ด ๊ฐ™์œผ๋ฉด ๋Œ€์นญ ํ–‰๋ ฌ(symmetric matrix)์ด๋ผ๊ณ  ํ•œ๋‹ค. # # $$ A^\mathrm{T} = A $$ # ์ „์น˜ ์—ฐ์‚ฐ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์„ฑ์งˆ์„ ๋งŒ์กฑํ•œ๋‹ค. # # $$ ( A^\mathrm{T} ) ^\mathrm{T} = A $$ # # $$ (A+B) ^\mathrm{T} = A^\mathrm{T} + B^\mathrm{T} $$ # # $$ \left( A B \right) ^\mathrm{T} = B^\mathrm{T} A^\mathrm{T} $$ # # $$ \det(A^\mathrm{T}) = \det(A) $$ # # $$ (A^\mathrm{T})^{-1} = (A^{-1})^\mathrm{T} $$ # ## ์ง€์ˆ˜ ํ–‰๋ ฌ # ํ–‰๋ ฌ $A$์— ๋Œ€ํ•ด ๋‹ค์Œ๊ณผ ๊ฐ™์€ ๊ธ‰์ˆ˜๋กœ ๋งŒ๋“ค์–ด์ง€๋Š” ํ–‰๋ ฌ $e^A=\exp A$ ๋ฅผ ์ง€์ˆ˜ ํ–‰๋ ฌ(exponential matrix)์ด๋ผ๊ณ  ํ•œ๋‹ค. # # A๊ฐ€ ํ–‰๋ ฌ์ธ ๊ฒฝ์šฐ์ž„ # # ํ…Œ์ผ๋Ÿฌ ์‹œ๋ฆฌ์ฆˆ. # # $$ e^X = \sum_{k=0}^\infty \dfrac{X^k}{k!} = I + X + \dfrac{1}{2}X^2 + \dfrac{1}{3!}X^3 + \cdots $$ # ์ง€์ˆ˜ ํ–‰๋ ฌ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์„ฑ์งˆ์„ ๋งŒ์กฑํ•œ๋‹ค. # # # $$ e^0 = I $$ # # $$ e^{aX} e^{bX} = e^{(a+b)X} $$ # # $$ e^X e^{-X} = I $$ # # $$ XY = YX \;\; \rightarrow \;\; e^Xe^Y = e^Ye^X = e^{X+Y} $$ # ## ๋กœ๊ทธ ํ–‰๋ ฌ # ํ–‰๋ ฌ $A$์— ๋Œ€ํ•ด ๋‹ค์Œ๊ณผ ๊ฐ™์€ ๊ธ‰์ˆ˜๋กœ ๋งŒ๋“ค์–ด์ง€๋Š” ํ–‰๋ ฌ $B=e^A$ ๊ฐ€ ์กด์žฌํ•  ๋•Œ, $A$๋ฅผ $B$์— ๋Œ€ํ•œ ๋กœ๊ทธ ํ–‰๋ ฌ์ด๋ผ๊ณ  ํ•˜๊ณ  ๋‹ค์Œ๊ณผ ๊ฐ™์ด ํ‘œ๊ธฐํ•œ๋‹ค. # # $$ A = \log B $$ # ๋กœ๊ทธ ํ–‰๋ ฌ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์„ฑ์งˆ์€ ๋งŒ์กฑํ•œ๋‹ค. # # ๋งŒ์•ฝ ํ–‰๋ ฌ $A$, $B$๊ฐ€ ๋ชจ๋‘ ์–‘-ํ•œ์ •(positive definite)์ด๊ณ  $AB=BA$์ด๋ฉด # # $$ AB = e^{\ln(A)+\ln(B)} $$ # # ๋งŒ์•ฝ ํ–‰๋ ฌ $A$์˜ ์—ญํ–‰๋ ฌ์ด ์กด์žฌํ•˜๋ฉด # # $$ A^{-1} = e^{-\ln(A)} $$ # # ์ง€์ˆ˜ ํ–‰๋ ฌ์ด๋‚˜ ๋กœ๊ทธ ํ–‰๋ ฌ์€ NumPy์—์„œ ๊ณ„์‚ฐํ•  ์ˆ˜ ์—†๋‹ค. SciPy์˜ linalg ์„œ๋ธŒํŒจํ‚ค์ง€์˜ `expm`, `logm` ๋ช…๋ น์„ ์‚ฌ์šฉํ•œ๋‹ค. A = np.array([[1.0, 3.0], [1.0, 4.0]]) A B = sp.linalg.logm(A) B sp.linalg.expm(B)
06. ๊ธฐ์ดˆ ์„ ํ˜•๋Œ€์ˆ˜/05. ํ–‰๋ ฌ์˜ ์—ฐ์‚ฐ๊ณผ ์„ฑ์งˆ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ls dictionary = {l.split()[0].strip():l.split()[1].strip() for l in open('aksw-bib_dictionary.txt', 'r').readlines() } # # Results # output results = [(x.split()[0],x.split()[1],x.split()[2]) for x in open('aksw-bib_output.txt', 'r').readlines()] for triple in results[13:20]: h, r, t = triple idx = results.index(triple) print(f'triple {idx}:') print(f'\th: {dictionary[h]}\n\tr: {dictionary[r]}\n\tt: {dictionary[t]}') # test_output results = [(x.split()[0],x.split()[1],x.split()[2]) for x in open('aksw-bib-test_output.txt', 'r').readlines()] for triple in results[13:20]: h, r, t = triple idx = results.index(triple) print(f'triple {idx}:') print(f'\th: {dictionary[h]}\n\tr: {dictionary[r]}\n\tt: {dictionary[t]}')
misc/explore_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 2 # # Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment. # # An NOAA dataset has been stored in the file `data/C2A2_data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv`. This is the dataset to use for this assignment. Note: The data for this assignment comes from a subset of The National Centers for Environmental Information (NCEI) [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe. # # Each row in the assignment datafile corresponds to a single observation. # # The following variables are provided to you: # # * **id** : station identification code # * **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012) # * **element** : indicator of element type # * TMAX : Maximum temperature (tenths of degrees C) # * TMIN : Minimum temperature (tenths of degrees C) # * **value** : data value for element (tenths of degrees C) # # For this assignment, you must: # # 1. Read the documentation and familiarize yourself with the dataset, then write some python code which returns a line graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between the record high and record low temperatures for each day should be shaded. # 2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015. # 3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for the purpose of this visualization. # 4. Make the visual nice! Leverage principles from the first module in this course when developing your solution. Consider issues such as legends, labels, and chart junk. # # The data you have been given is near **Ann Arbor, Michigan, United States**, and the stations the data comes from are shown on the map below. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as dates import matplotlib.ticker as ticker # %matplotlib notebook # %matplotlib inline data = pd.read_csv('data/C2A2_data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv') data['Data_Value'] = data['Data_Value']*0.1 data['Date'] = pd.to_datetime(data['Date']) data['Year'] = data['Date'].dt.year data['Month_Day'] = data['Date'].dt.strftime('%m-%d') data = data[data['Month_Day']!='02-29'] max_temp = data[(data.Year >= 2005) & (data.Year < 2015) & (data['Element'] == 'TMAX')].groupby(['Month_Day'])['Data_Value'].max() min_temp = data[(data.Year >= 2005) & (data.Year < 2015) & (data['Element'] == 'TMIN')].groupby(['Month_Day'])['Data_Value'].min() data = data.merge(max_temp.reset_index(drop=False).rename(columns={'Data_Value':'Max_temp'}), on='Month_Day', how='left') data = data.merge(min_temp.reset_index(drop=False).rename(columns={'Data_Value':'Min_temp'}), on='Month_Day', how='left') record_high = data[(data.Year==2015)&(data.Data_Value > data.Max_temp)] record_low = data[(data.Year==2015)&(data.Data_Value < data.Min_temp)] date_index = np.arange('2015-01-01','2016-01-01', dtype='datetime64[D]') plt.figure() plt.plot(date_index,max_temp,color='lightcoral', linewidth=1) plt.plot(date_index,min_temp,color='skyblue', linewidth=1) plt.scatter(record_high.Date.values, record_high.Data_Value.values, color='red', s=8) plt.scatter(record_low.Date.values, record_low.Data_Value.values, color='blue', s=8) ax = plt.gca() ax.axis(['2015/01/01','2015/12/31',-50,50]) plt.xlabel('Date', fontsize=10) plt.ylabel('ยฐ Celsius', fontsize=10) plt.title('Temperature in Ann Arbour, Michigan (2005-2015)', fontsize=12) plt.legend(['Record high (2005-2014)','Record low (2005-2014)','Record breaking high in 2015','Record breaking low in 2015'],loc=0,frameon=False) ax.fill_between(date_index, max_temp, min_temp, facecolor='grey', alpha=0.25) ax.xaxis.set_major_locator(dates.MonthLocator()) ax.xaxis.set_minor_locator(dates.MonthLocator(bymonthday=15)) ax.xaxis.set_major_formatter(ticker.NullFormatter()) ax.xaxis.set_minor_formatter(dates.DateFormatter('%b')) for tick in ax.xaxis.get_minor_ticks(): tick.tick1line.set_markersize(0) tick.label1.set_horizontalalignment('center') # -
Applied Plotting Charting and Data Representation in Python/Assignment2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="U10NaJ1Ng-RC" import numpy as np import matplotlib.pyplot as plt import librosa import soundfile as sf import librosa.display from glob import glob import os from tqdm import tqdm # + id="0MeoFOrZhNiY" def splitData(X, t, testFraction=0.2, randomize = False): """ Split the data randomly into training and test sets Use numpy functions only Inputs: X: (np array of len Nsamples) input feature vectors t: (np array of len Nsamples) targets; one hot vectors testFraction: (float) Nsamples_test = testFraction * Nsamples Outputs: X_train: training set X_test: test set t_train: training labels t_test: test labels """ if randomize is False: tot_samples = np.random.RandomState(seed=42).permutation(len(X)) else: tot_samples = np.random.permutation(len(X)) X_train = [] X_test = [] t_train = [] t_test = [] test_samples = max(1,int(len(X)*testFraction)) # print(tot_samples[:test_samples]) # print(tot_samples[test_samples:]) for i in range(test_samples): X_test.append(X[tot_samples[i]]) t_test.append(t[tot_samples[i]]) for i in range(test_samples,len(X)): X_train.append(X[tot_samples[i]]) t_train.append(t[tot_samples[i]]) return np.asarray(X_train), np.asarray(t_train), np.asarray(X_test), np.asarray(t_test) def calc_spec(x): n_fft = 1024 hop_length = 512 win_length = 1024 X = np.abs(librosa.stft(x, n_fft = n_fft, hop_length = hop_length, win_length = win_length, window='hann')) X = librosa.power_to_db(X**2,ref=np.max) return X def audio2spec(x, norm=True): ''' Compute Mel-frequency cepstral coefficients (MFCCs) Inputs: x: np array of shape (Nsamples,) Output: X: (np array) spectrogram sequence ''' X=[] for sample in x: X.append(calc_spec(sample)) if norm is True: X = (X-np.mean(X))/np.std(X) return np.asarray(X) # + id="YPy_T57fhY5-" def computeCM(y, y_hat): ''' Compute confusion matrix to evaluate your model Inputs: y = labels y_hat = predicted output Output: confusion matrix: confusion matrix ''' tp=0 tn=0 fp=0 fn=0 for i in range(len(y)): if y[i][0] == 1 and y_hat[i][0] == 1: tp+=1 elif y[i][0] == 0 and y_hat[i][0] == 0: tn+=1 elif y[i][0] == 1 and y_hat[i][0] == 0: fn+=1 elif y[i][0] == 0 and y_hat[i] == 1: fp+=1 confusion_matrix = [[tp,fp],[fn,tn]] return np.asarray(confusion_matrix) # + id="lFa4MEUjl8jg" # !pip install noisereduce # + id="kpMsywQfl41b" import librosa import os import noisereduce as nr import glob import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import pandas as pd from collections import defaultdict import soundfile as sf from tqdm import tqdm # + id="7bW_kevKlsSh" def is_overlap(cstart_time, cend_time, start_time, end_time): if(cstart_time > end_time): return False if(cend_time < start_time): return False if(start_time > cstart_time and end_time < cend_time ): return True if(start_time > cstart_time): overlap = start_time - cstart_time if(overlap > 0.50 * (start_time - cend_time)): return True else: return False if(end_time < cend_time): overlap = cend_time - end_time if(overlap > 0.50 * (start_time - cend_time)): return True else: return False return True def one_hot_enc(cl_name): if(cl_name == 'music'): return np.array([0,1,0]) elif(cl_name == 'speech'): return np.array([1,0,0]) else: return np.array([0,0,1]) def load_audio(filename, cls, Fs = 16000): # Return bins of size of 1000 corresponding to x, _ = librosa.load(filename, sr=Fs) x = nr.reduce_noise(x, Fs) x = np.reshape(x[:K], (-1,SPLIT_SIZE)) labels = np.zeros((N,3)) for index, sub_audios in enumerate(x): class_assigned = False for tup in enumerate(cls): if(is_overlap(tup[1][0], tup[1][1], index*SPLIT_SIZE/K, (index+1)*SPLIT_SIZE/K)): labels[index] = one_hot_enc(tup[1][2]) class_assigned = True break if(class_assigned == False): labels[index] = one_hot_enc('silence') return labels, x def load_data(foldername, Fs=16000): ''' Inputs: foldername: (str) foldername Fs: (int) sampling rate Output: data: np array of data ''' files = os.listdir(foldername+'/wav') df = pd.read_csv (foldername+'/labels.csv', usecols=['filename','onset','offset','class']) print(df.head()) labels_data = df.to_numpy() data = dict() labels = defaultdict(list) for entry in labels_data: # print(entry[0]) # print((entry[1],entry[2],entry[3])) labels[entry[0]].append((entry[1],entry[2],entry[3])) labels_list = [] data_list = [] for wav_file in tqdm(files): ls, audio = load_audio(foldername+"/wav/"+wav_file, labels[wav_file[:-4]], Fs) labels_list.append(ls) data_list.append(audio) labels_final = np.array(labels_list) data_final = np.array(data_list) labels_final = labels_final.reshape(N*len(files), 3) data_final = data_final.reshape(N*len(files), SPLIT_SIZE) # print(labels_final.shape) # print(data_final.shape) return labels_final, data_final def reverse_one_hot(ohv): if(ohv[1] == 1): return "music" elif(ohv[0] == 1): return "speech" else: return "silence" def save_data(train_labels, train_audios): for i in range(len(train_audios)): print(reverse_one_hot(train_labels[i])) sf.write('/content/drive/MyDrive/Sem 5/EE603/project/val_set/split_wavs/'+str(i)+'.wav', train_audios[i], 16000) def get_mfcc(train_audios, n_mfcc=20, Fs=16000): mfccs=[] for audio in tqdm(train_audios): mfccs.append(librosa.feature.mfcc(audio, n_mfcc=n_mfcc, sr=Fs, n_fft=512)) # mfccs.append(audio_mfcc) mfccs = np.array(mfccs) return mfccs # + id="FTOiJLIAltMB" SAMPLING_RATE = 16000 AUDIO_DURATION = 10 #in seconds FRAME_ACCURACY = 0.99 SPLIT_SIZE = 1000 MAX_SAMPLES = 0 K = int(AUDIO_DURATION*SAMPLING_RATE*FRAME_ACCURACY/SPLIT_SIZE)*SPLIT_SIZE N = int(K/SPLIT_SIZE) # + colab={"base_uri": "https://localhost:8080/"} id="1XUHniXSlvXa" executionInfo={"status": "ok", "timestamp": 1636749474409, "user_tz": -330, "elapsed": 16607, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzq9zlJtl-Oe29ULdfPQjb7bTMJy0SAXakH_QV5A=s64", "userId": "08884573075132776173"}} outputId="3a6163cc-61f4-4c89-be6c-ff903ab2555e" train_labels, train_audios = load_data('/content/drive/MyDrive/Sem 5/EE603/project/val_set') # + colab={"base_uri": "https://localhost:8080/"} id="ycV0RE9v3t1V" executionInfo={"status": "ok", "timestamp": 1636749501937, "user_tz": -330, "elapsed": 399, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzq9zlJtl-Oe29ULdfPQjb7bTMJy0SAXakH_QV5A=s64", "userId": "08884573075132776173"}} outputId="cd950ab8-eb27-45cf-a8ba-cdfa1e1db925" print(np.shape(train_labels), type(train_labels)) print(np.shape(train_audios), type(train_audios)) # + id="EveK9UNHhXXH" class Classifier: ''' Create a linear classifier to classify each frame ''' def __init__(self,lr=10**-2,epoch=10**4): self.W = -1 self.b = 0 self.lr = lr self.epoch = epoch def softMax(self,z): z = np.exp(z - np.max(z)) return z/np.sum(z) def train(self,x_train, y_train): ''' Train the linear classifier Inputs: x_train: training set y_train: training labels Output: None ''' print("Received input data and labels of shapes:",np.shape(x_train), np.shape(y_train)) n_files = np.shape(x_train)[0] n_features = np.shape(x_train)[1] n_frames = np.shape(x_train)[2] n_classes = np.shape(y_train)[1] m = n_files print(n_files, "n_files") print(n_frames, "n_frames") print(n_features, "n_features") print(n_classes, "n_classes") self.W = np.random.RandomState(seed=42).random((n_features, n_classes)) self.b = np.random.RandomState(seed=42).random(n_classes) print("Initialised weights and bias with shapes:", np.shape(self.W), np.shape(self.b)) X = [] Y = [] losses = [] for i in range(n_files): for j in range(n_frames): X.append(x_train[i][:,j]) Y.append(y_train[i]) X = np.asarray(X) Y = np.asarray(Y) print("final shapes of data fed to training:",np.shape(X),np.shape(Y)) for i in tqdm(range(self.epoch)): Z = X@self.W + self.b Y_hat = self.softMax(Z) w_grad = (1/m)*np.dot(X.T, (Y_hat - Y)) b_grad = (1/m)*np.sum(Y_hat - Y) self.W -= self.lr*w_grad self.b -= self.lr*b_grad # loss = (-1/m)*np.sum(np.log(Y_hat+1e-9)*Y ) loss = -np.mean(np.log(1e-10+Y_hat[np.arange(len(Y)), np.argmax(Y, axis=1)])) losses.append(loss) return losses def save_model(self, save_path): ''' Save the trained model on local disk Input: save_path: location at which model is to be saved Output: None ''' with open(os.path.join(save_path,'W.npy'), 'wb') as f: np.save(f,self.W) with open(os.path.join(save_path,'b.npy'), 'wb') as f: np.save(f,self.b) def load_model(self, load_path): ''' Save the trained model on local disk Input: load_path: location from which model is to be loaded Output: None ''' with open(os.path.join(load_path,'W.npy'), 'rb') as f: self.W = np.load(f) with open(os.path.join(load_path,'b.npy'), 'rb') as f: self.b = np.load(f) def predict_framewise(self,x_test): ''' Framewise classification (speech or music) Input: x_test: single frame (n_features,1) Output: y_pred_framewise = class prediction ''' z = x_test@self.W + self.b y_hat = np.argmax(self.softMax(z)) y_hot = np.zeros(np.shape(z)[0]) y_hot[y_hat] = 1 return y_hot def predict_aggregate(self,y_pred_framewise): ''' Aggregate frames to give a single class label (music or speech) to the entire audio file Input: y_pred_framewise: framewise prediction Output: y_hat: frame aggregate (one-hot vectors) ''' y_pred_framewise = np.asarray(y_pred_framewise) if y_pred_framewise.ndim > 1: counts = np.sum(y_pred_framewise, axis=0) y_hat = np.argmax(counts) y_hot = np.zeros(np.shape(counts)[0]) y_hot[y_hat] = 1 return y_hot def predict(self,x_test): ''' Return one hot encoded classification results for the test set Input: x_test: testing set (normalised spectrograms) Output: y_hat: list of one hot vectors for classification on x_test ''' y_hat = [] for f in x_test: y_pred_framewise = [] for window in range(np.shape(f)[1]): y_pred_framewise.append(self.predict_framewise(f[:,window])) y_hat.append(self.predict_aggregate(y_pred_framewise)) return y_hat # + colab={"base_uri": "https://localhost:8080/", "height": 564} id="Ri1VTSEImyc_" executionInfo={"status": "ok", "timestamp": 1636750174789, "user_tz": -330, "elapsed": 40454, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzq9zlJtl-Oe29ULdfPQjb7bTMJy0SAXakH_QV5A=s64", "userId": "08884573075132776173"}} outputId="e7839aa1-f234-4b3b-fb94-76376eeb66a6" EPOCHS = 10**3 LR = 10**-2 TESTFRACTION = 0.2 # train_audios_mfcc = audio2spec(train_audios) # print(np.shape(train_audios_mfcc), type(train_audios_mfcc)) # train_audios_mfcc = np.reshape(train_audios_mfcc, (np.shape(train_audios_mfcc)[0],-1,1)) # print(np.shape(train_audios_mfcc), type(train_audios_mfcc)) X = train_audios Y = train_labels X_train, t_train, X_test, t_test = splitData(X=X, t=Y, testFraction=TESTFRACTION, randomize=True) print("check shapes after train test split") print(np.shape(X_train), np.shape(t_train)) print(np.shape(X_test), np.shape(t_test)) # TRAINING X_train = audio2spec(x=X_train, norm=True) X_train = np.reshape(X_train, (np.shape(X_train)[0],-1,1)) print("training") model = Classifier(lr=LR,epoch=EPOCHS) hist=model.train(x_train=X_train, y_train=t_train) #PLOTTING plt.plot(np.arange(EPOCHS),hist) plt.title("Loss vs Epoch at LR = "+str(LR)) plt.xlabel("Epoch") plt.ylabel("Loss") plt.show() X_test = audio2spec(x=X_test, norm=True) X_test = np.reshape(X_test, (np.shape(X_test)[0],-1,1)) y_hat = model.predict(x_test=X_test) # # EVALUATION METRICS # confusion_matrix = computeCM(y=t_test, y_hat=y_hat) # plt.matshow(confusion_matrix, cmap='gray') # plt.title("Confusion Matrix") # plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="xpx5weCHnPTw" executionInfo={"status": "ok", "timestamp": 1636750181648, "user_tz": -330, "elapsed": 413, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghzq9zlJtl-Oe29ULdfPQjb7bTMJy0SAXakH_QV5A=s64", "userId": "08884573075132776173"}} outputId="6de8193a-e216-4178-a35e-38b6ea481d2a" test_accuracy = np.mean(np.asarray(y_hat).ravel() == np.asarray(t_test).ravel()) * 100 # test_accuracy = classifier.score(X_test, y_test) print('Test accuracy:', test_accuracy) # + id="J5JNJlhEhd1m" model.save_model('') # !ls model.load_model('') print(model.W) print(model.b) print(np.shape(model.W)) print(np.shape(model.b))
OldTestCodes/LogReg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jongchurlwon/github_test/blob/main/WON.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="93bEX3OekRpC" outputId="a5136c33-e940-4169-a2f9-8c58d1ba361d" # !pip install pyupbit # + id="UZIEl5LLm3ju" import pyupbit # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="C2v3qLXgHkH6" outputId="89299565-0cde-4262-8062-a7844e1efc15" #BTC ์ตœ๊ทผ 200์‹œ๊ฐ„์˜ ๋ฐ์ดํ„ฐ ๋ถˆ๋Ÿฌ์˜ด df = pyupbit.get_ohlcv("KRW-ETH", interval="minute60") df # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="lPQ0GRg2HocC" outputId="c5fc94f0-be4d-4b97-9441-260e60c010fe" #์‹œ๊ฐ„(ds)์™€ ์ข…๊ฐ€(y)๊ฐ’๋งŒ ๋‚จ๊น€ df = df.reset_index() df['ds'] = df['index'] df['y'] = df['close'] data = df[['ds','y']] data # + id="480FcJxkHsbS" #prophet ๋ถˆ๋Ÿฌ์˜ด from fbprophet import Prophet # + colab={"base_uri": "https://localhost:8080/"} id="6lH8UivhH2F7" outputId="ee6977cc-0dda-44cc-dd5c-9d0ee012ad6c" #ํ•™์Šต model = Prophet() model.fit(data) # + id="Vj7MoiwlH5Ky" #24์‹œ๊ฐ„ ๋ฏธ๋ž˜ ์˜ˆ์ธก future = model.make_future_dataframe(periods=24, freq='H') forecast = model.predict(future) # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="8sk521bvH71a" outputId="536d97a5-9f64-4aff-e168-c3bd74bf4ef5" #๊ทธ๋ž˜ํ”„1 fig1 = model.plot(forecast) # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="36XBgtS8IAQy" outputId="7b3b706e-f459-47a0-b5c2-e447442ffd86" #๊ทธ๋ž˜ํ”„2 fig2 = model.plot_components(forecast) # + colab={"base_uri": "https://localhost:8080/"} id="u6FImXp8IGHq" outputId="e45f5eea-4fec-4420-a5e4-7ce9443da705" #๋งค์ˆ˜ ์‹œ์ ์˜ ๊ฐ€๊ฒฉ nowValue = pyupbit.get_current_price("KRW-ETH") nowValue # + colab={"base_uri": "https://localhost:8080/"} id="8xptR4I3IK1a" outputId="f8cdbc28-701d-4923-c43f-4920f7166795" #์ข…๊ฐ€์˜ ๊ฐ€๊ฒฉ์„ ๊ตฌํ•จ #ํ˜„์žฌ ์‹œ๊ฐ„์ด ์ž์ • ์ด์ „ closeDf = forecast[forecast['ds'] == forecast.iloc[-1]['ds'].replace(hour=9)] #ํ˜„์žฌ ์‹œ๊ฐ„์ด ์ž์ • ์ดํ›„ if len(closeDf) == 0: closeDf = forecast[forecast['ds'] == data.iloc[-1]['ds'].replace(hour=9)] #์–ด์จ‹๋“  ๋‹น์ผ ์ข…๊ฐ€ closeValue = closeDf['yhat'].values[0] closeValue # + colab={"base_uri": "https://localhost:8080/"} id="danZrBXEInjK" outputId="f6cbf58a-f960-474f-c467-5b9641fc86c1" #๊ตฌ์ฒด์ ์ธ ๊ฐ€๊ฒฉ print("ํ˜„์žฌ ์‹œ์  ๊ฐ€๊ฒฉ: ", nowValue) print("์ข…๊ฐ€์˜ ๊ฐ€๊ฒฉ: ", closeValue) # + colab={"base_uri": "https://localhost:8080/", "height": 779} id="TATQ-bEmIssK" outputId="a80314ad-b2ec-4a59-a66b-12f7e378ea8c" forecast
WON.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.display import YouTubeVideo # # Quellen # # Die folgenden Erlรคuterungen beschreiben Notizen zu dem Video: YouTubeVideo('BUNl0To1IVw', width=300, height=200) # Die Darstellungen innerhalb dieses Notebooks sind ebenfalls aus diesem entnommen. # In diesem Rahmen mรถchte ich mich fรผr die Arbeit der folgenden Referenz bedanken # und diese als Quelle aller Inhalte dieses Notebooks benennen: # # ยฉ <NAME> and <NAME> # MIT 6.S191: Introduction to Deep Learning # IntroToDeepLearning.com # # 1. Generative modeling # # * Beschreibt einen Bereich des Unsupervised Learnings # * Ziel: Nimmt als Trainingsdaten Samples von einer Verteilung und lernt ein Modell, dass die Verteilung reprรคsentiert # * Use-Cases: # > * Density Estimation # > * Sample Generation # # ### 1.1. Vorteile generativer Modelle # > * Erkennen der dem Datensatz zugrundeliegenden Struktur (Verteilung) # > * Erzeugen eines unbaised Datensatzes # > * Outlier Detection (Durch Werte die Stark von der gelernten Verteilung abweichung) # ### 1.2. Latent variable models # # Latent variable models, sind eine Subgruppe des generative modelings. # Sie kรถnnen verwendet werden, um die latenten Faktoren zu erlernen, die einer Verteilung zugrunde liegen. # # Beispiele fรผr Arten von latent variable models sind: # * Generative adversarial networks # * Variational Autoencoders # # # 2. Autoencoders # # Autoencoders beschreiber einen Ansatz, um eine niederdimensionale Feature Reprรคsentation von unlabelled Daten zu erlernen. # Dabei erzeugen sie eine Transformation, die, รคhnlich wie die PCA den Datenraum reduziert und dennoch versucht die Kerninformationen in diesem zu erhalten. # # Vorteile die aus dem Erlernen einer solchen Reprรคsentation entstehen sind die folgenden: # > * Kompression der Daten # > * Verringern der Dimensionalitรคt $\rightarrow$ Lernprozess effektivieren # # Diese Modelle bestehen aus zwei Komponenten, welche wรคhrend der Trainingsphase gelernt werden. Diese sind: # 1. Encoder # 2. Decoder # # ### 2.1. Encoder # # Der Encoder lernt ein Mapping der Input-Daten $x$ auf einen niederdimensionalen latenten Raum $z$. Erreicht wird dies durch mehrere Layer, welche jeweils, eine geringere Dimensionalitรคt aufweisen, wie das davor liegend Layer. Daraus entsteht ein Modell, was dem Aufbau der folgenden Darstellung รคhnelt: # # # <img src="./bilder/encoder.PNG" alt="encoder" width="400" style="background-color:white;"/> # # ### 2.2. Decoder # # Der Decoder beschreibt ein Modell, das lernt die ursprรผnglichen Input-Werte, aus dem Output $z$ des Encoders zu rekonstruieren $\hat{x}$. Dabei ist der Aufbau des Modells gegenteilig zu dem des Encoders und hat im Output-Layer die gleiche Dimensionalitรคt, wie $x$, wie der folgenden Abbildung entnommen werden kann: # # <img src="./bilder/decoder.PNG" alt="decoder" width="400" style="background-color:white;"/> # # ### 2.3. Wofรผr der Decoder? # # Wรคhrend des Trainingsprozess wird der Erfolg des Encoders durch die Fรคhigkeit des Models beschrieben, die Daten so zu transformieren, dass der Output des Encoders $z$ eine interpretierbare Reprรคsentation von $x$ darstellt. Mittels des Decoders kann nun รผberprรผft werden, ob dies zutrifft. # Dies geschieht durch den MSE zwischen $x$ und $\hat{x}$: # # <img src="./bilder/autoencoder.PNG" alt="autoencoder" width="600" style="background-color:white;"/> # # # # 3. Variational Autoencoders # # Unterscheidet sich zu "normalen" Autoencodern dadurch, dass hier kein Mapping auf einen niederdimensionale Vektor $z$ entsteht, sondern ein Mapping auf eine Verteilung. Somit wird eine Wahrscheinlichkeitsfunktion der latenten Variablen gelernt. # # Der Layer $z$ wird somit ersetzt durch ein Mapping auf eine Verteilung. Durch diese Mapping entsteht eine neue Netzwerkarchitektur, die wie folgt aussieht: # # <img src="./bilder/VAEs.PNG" alt="VAEs" width="600" style="background-color:white;"/> # # ### 3.1. VAE Lernprozess # Durch diese Architektur kรถnnen neue Daten erzeugt werden, in dem latente Samples durch die gelernte Verteilungsparameter erstellt werden. Hier entsteht also ein probabilistischer Ansatz, in dem der VAE zwei Funktionen lernt. # # 1. Encoder lernt: # > $q_\phi(z|x)$: Wahrscheinlichkeitsverteilung von $z$ gegeben dem Input $x$ # # 2. Decoder lernt: # > $p_\theta(x|z)$: Wahrscheinlichkeitsfunktion fรผr $x$ gegeben der Wahrscheinlichkeitsverteilung $z$ # # Die Verlustfunktion mit der dieser Prozess durchgefรผhrt wird betrachtet dabei zwei Komponenten: # # $\mathcal{L}(\phi, \theta, x)= ($reconstruction loss $)+($regularization term$)$ # # Dabei beschreibt der **reconstruction loss**, wie zuvor, eine Verlustfunktion, wie z.B. den MSE, welche mit dem Logarithmus transformiert wird. Die zweite Komponente, der **regularization term**, hingegen ist neu an dieser Stelle. Er beschreibt die Differenz zwischen einer gegebenen Verteilung $q_\phi(z|x)$ und der Normalverteilung $p(z)$. Mit ihm wird sichergestellt, dass die vom Model gelernte Wahrscheinlichkeitsfunktion normalverteilt ist und verhindert, dass ein Overfitting der Verteilung auf Teile des latenten Raumes geschieht. Die Funktion mit der dies umgesetzt wird ist als Kullback-Leibler-Divergenz bekannt: # # <img src="./bilder/Kullback-leibler.PNG" alt="KL" width="600" style="background-color:white;"/> # # ### 3.2. Relevanz der Regularisierung # # Wird eine Regularisierung des Models nicht durchgefรผhrt, so wird die gelernte Verteilung $q_\phi(z|x)$ nicht berรผcksichtigt und es besteht keinerlei Grund diese an bestimmte Verteilungen anzupassen. # Daraus entstehen wird keine generalisierte Verteilungsfunktion, sondern lediglich eine Art "clustering". Der Decoder lernt damit keine kontinuierliche Verteilung รผber den latenten Raum, sondern, welche Bereiche des latenten Raumes, einem bestimmten Wert $x$ zugeordnet werden kรถnnen. # # Wird nun durch den Regularisierungsterm durchgesetzt, dass der latente Raum normalverteilt ist, entstehen mehrere positive Eigenschaften: # * Symmetrie um das Zentrum des latenten Raumes # * Clusterung wird bestraft, Generalisierung gefรถrdert # * Kontinuitรคt des latenten Raumes, sprich Punkte, die sich รคhnlich sind, im latenten Raum $z$, dessen Output des decoding Networks, sind ebenfalls รคhnlich # * Vollstรคndigkeit, sprich Samples, generiert durch die Verteilung des latenten Raumes, erzeugen durch die Kontinuitรคt dieses einen sinnvollen Output des Decoders. # # Verdeutlicht wird der Vorteil der Regularisierung durch die folgende Darstellungen. Dabei wird ein zweidimensionaler latenter Raum betrachtet. Er zeigt eine niederdimensionale Reprรคsentation von geometrischen Symbolen. AuรŸerdem dargestellt wird, wie Punte im latenten Raum und in ihrer decodierten Form aussehen. Hierbei wird unterschieden danach, ob eine Regularisierung durchgefรผhrt wurde. # # <img src="./bilder/whyreg.PNG" alt="whyreg" width="600" style="background-color:white;"/> # # Bei ausbleibender Regularisierung kann somit gezeigt werden, dass lediglich ein Clustering des latenten Raumes stattfindet. Punkte die z.T. sehr รคhnlich im latenten Raum sind, haben in ihrer decodierten Form keinerlei Bezug zueinander. Auch ist der latente Raum nicht kontinuierlich und erlaubt keine sinnvolle dekodierung von Punkten aushalb der bereits bekannten Punkte. # # Im Gegenteil dazu zeigen sich die oben definierten Vorteile, sofern eine Regularisierung stattfindet. Dadurch wird sichergestellt, dass Samples, erzeugt durch die Verteilung des latenten Raumes, eine sinnvolle bedeutung haben und ein dekodieren neuer Samples mรถglich wird. # # ### 3.3. Reparameterization trick # # Ausgelรถst durch das stochastische Sampling-Layer, in welchem durch die Verteilung $q_\phi(z|x)$ Beispieldaten erzeugt werden, wird die Mรถglichkeit der Backpropagation auf die Layer des Encoders anzuwenden, verhindert, da der Gradient fรผr diese nun nicht mehr bestimmt werden kann. # # Um dieses Problem zu umgehen, wurde der **reparameterization trick** in die VAEs integriert. Dabei handelt es sich um das Auslagern des normalverteilten stochastischen Samplingprozesses in eine Node im Netwerk, die den Vektor $\epsilon$ erzeugt. Dieser Vektor wird multipliziert mit $\sigma$ und $\mu$ dazu addiert $z$. Dadurch verรคndert wird folgendes: # # <img src="./bilder/reparameterization.PNG" alt="Reparameterization" width="550" style="background-color:white;"/> # # # Dabei ermรถglicht die Verรคnderung des Prozesses, dass die Parameter $\mu$ und $\sigma$ nun Gewichte der Aktivierungsfunktion $g$ werden und die Backpropagation verwendet werden kann, um die Verteilungsparameter iterativ zu optimieren. # # 4. Generative Adversarial Networks # # Generative Adversarial Networks (GANs) beschreiben eine weitere Art von generativen Modellen. Mit ihnen kann รผber Random Gaussian Noise als Input ein Transformermodell gelernt werden, dass synthetische Daten erzeugt, dessen Verteilung der von echten Daten รคhnelt. # # Erreicht wird dies, durch zwei Komponenten: # # 1. Generator Network $G$: # > * Lernt aus random gaussian noise $z$ synthetische Daten $X_{fake}$ zu erzeugen. # # 2. Discrimenator Network $D$: # > * Lernt echte Daten $X_{real}$ von den durch den Generator erzeugten Daten $X_{fake}$ zu unterschieden. # # Das Zusammenspiel beider Networks wird durch die folgende Abbildung dargestellt: # # <img src="./bilder/GANs.PNG" alt="GANs" width="750" style="background-color:white;"/> # # ### 4.1. Wie lernen GANs? # # Um mittels des generator Networks $G$ synthetische Daten zu erzeugen, die echten Daten รคhneln, muss dieser dessen Verteilung lernen. GleichermaรŸen wird wรคhren der iterativen Verbessung der Discriminator $D$ besser darin, die von $G$ erzeugten Daten zu erkennen. Daraus resultierend wird $G$ gezwungen, bessere synthetische Daten zu erzeugen. Der Wettbewerb zwischen $G$ und $D$ fรผhrt somit zu einer iterativen Verbesserung beider Modelle, wobei diese sich fortlaufend an den Fortschritt des jeweils anderen Models anpassen mรผssen. # # Erreicht wird dieses Ziel durch eine geeignete Verlustfunktion $\mathcal{L}$, welche die Backpropagation ermรถglicht. Dabei muss $\mathcal{L}$ zwei Bedingungen erzwingen: # # 1. $G$ erzeugt synthetische Daten $X_{fake}$, dessen Verteilung der von echten Daten gleicht $X_{real}$. # 2. $D$ ist in der Lage $X_{fake}$ von $X_{real}$ zu unterscheiden. # # Umgesetzt werden diese Bedingungen durch die Entropy. Sie beschreibt ein MaรŸ dafรผr, wie gut der Discriminator die synthetischen Daten von den echten Daten unterscheiden kann und ist wie folgt Definiert: # # > $\mathbb{E}_{\mathbf{z}, \mathbf{x}}[\log D(G(\mathbf{z}))+\log (1-D(\mathbf{x}))]$ # # Eine genaue Intution รผber das Verhalten dieser Funktion wird errecht durch das betrachten des Logarithmus. # # <img src="./bilder/log.png" alt="log" width="350" style="background-color:white;"/> # # Fรผr diesen gilt, # > $\lim \limits_{x \to 0} log(x) = -\infty$ # > $log(1) = 0$ # # Demnach ergibt sich fรผr die obige Entropy ein negativer unendlicher Wert, wenn entweder $D(X)$ hoch oder $D(G(z))$ gering ist. # Um nun die obigen Bedingungen zu erfรผllen muss daher folgendes erreicht werden: # # 1. $G$ erzeugt synthetische Daten $X_{fake}$, dessen Verteilung der von echten Daten gleicht $X_{real}$: # > $\arg \min\limits_{G} \mathbb{E}_{\mathbf{z}, \mathbf{x}}[\log D(G(\mathbf{z}))+\log (1-D(\mathbf{x}))]$ # > Es muss also jener Generator erlernt werden, fรผr den ein $D(G(\mathbf{z}))$ nahe 1 vorhergesagt wird. # > GleichermaรŸen sollte der Generator so gut sein, dass der Discriminator $D$ davon ausgeht, dass $D(X)$ nahe 0 ist. # # 2. $D$ ist in der Lage $X_{fake}$ von $X_{real}$ zu unterscheiden. # > $\arg \max\limits_{D} \mathbb{E}_{\mathbf{z}, \mathbf{x}}[\log D(G(\mathbf{z}))+\log (1-D(\mathbf{x}))]$ # > Der Diskriminator sollte versuchen die Entropy zu maximieren # > So wird umgesetzt, dass $D(G(\mathbf{z}))$ mรถgichst nahe 0 ist und $D(\mathbf{x})$ mรถglichst nahe 1, # # Werden die beiden Bedingungen zusammengefรผgt so ergibt sich Bedingung: # > $\arg \min\limits_{G} \max\limits_{D} \mathbb{E}_{\mathbf{z}, \mathbf{x}}[\log D(G(\mathbf{z}))+\log (1-D(\mathbf{x}))]$ # # ### 4.2. Erzeugen synthetischer Daten # # Wenn der Lernprozess abgeschlossen ist, hat der Generator $G$ gelernt, Daten zu erzeugen, die echten Daten รคhneln. Deshalb kann er dazu verwendet werden, mit zufรคlligen Inputwerten, neue (synthetische) zu erzeugen.
gans-and-vaes/Summary.ipynb