code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + executionInfo={"elapsed": 3528, "status": "ok", "timestamp": 1603809968590, "user": {"displayName": "<NAME> _", "photoUrl": "", "userId": "13823255111802956482"}, "user_tz": -480} id="TI_MX5jRPda2" import numpy as np import pandas as pd import random from pylab import * import matplotlib.pyplot as plt # import warnings # warnings.filterwarnings('ignore') import scipy.sparse as sparse # - from tqdm import tqdm import pickle # + executionInfo={"elapsed": 18493, "status": "ok", "timestamp": 1603809983620, "user": {"displayName": "<NAME> _", "photoUrl": "", "userId": "13823255111802956482"}, "user_tz": -480} id="bndAJ-UHPdbD" orders = pd.read_csv('orders.csv') order_products_train = pd.read_csv('order_products__train.csv') order_products_prior = pd.read_csv('order_products__prior.csv') # + executionInfo={"elapsed": 18486, "status": "ok", "timestamp": 1603809983626, "user": {"displayName": "<NAME> _", "photoUrl": "", "userId": "13823255111802956482"}, "user_tz": -480} id="Q0ufWXlpPdbF" def get_user_product_prior(orders, order_products_prior): order_user_prior = orders.loc[orders.eval_set == 'prior'] order_user_prior = order_user_prior[['order_id', 'user_id']] df_temp = pd.merge(order_user_prior, order_products_prior[['order_id', 'product_id']], on='order_id') user_product_prior = df_temp[['user_id', 'product_id']] user_product_prior = user_product_prior.groupby(['user_id', 'product_id']).size().reset_index().rename(columns={0: 'quantity'}) return user_product_prior # - def get_user_product_validation(orders, order_products_train): order_user_train = orders.loc[orders.eval_set == 'train'] order_user_train = order_user_train[['order_id', 'user_id']] df_temp = pd.merge(order_user_train, order_products_train[['order_id', 'product_id']], on='order_id') user_product_train = df_temp[['user_id', 'product_id']] user_product_train = user_product_train.groupby(['user_id', 'product_id']).size().reset_index().rename(columns={0: 'quantity'}) return user_product_train # + executionInfo={"elapsed": 33320, "status": "ok", "timestamp": 1603809998470, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13823255111802956482"}, "user_tz": -480} id="pustL9S-PdbI" user_product_prior = get_user_product_prior(orders, order_products_prior) # - user_product_validation = get_user_product_validation(orders, order_products_train) # + colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"elapsed": 33298, "status": "ok", "timestamp": 1603809998477, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13823255111802956482"}, "user_tz": -480} id="_FW57UbSce8I" outputId="d720c3ef-cac8-4d8d-c70b-8f3328925fff" user_product_prior.nunique() # - user_product_validation.nunique() # + [markdown] id="VKH3jN4qem5u" # ## PMF with SGD # # https://github.com/XiuzeZhou/SGD-PMF/tree/master/SGD_PMF # # - def get_user_item_dict(user_product_prior): all_user_id = user_product_prior.user_id.unique() all_item_id = user_product_prior.product_id.unique() user_dict = {all_user_id[i]:i for i in range(len(all_user_id))} item_dict = {all_item_id[i]:i for i in range(len(all_item_id))} return user_dict, item_dict user_dict, item_dict = get_user_item_dict(user_product_prior) def map_user_product_df(user_product_prior, user_dict, item_dict): user_product_prior.user_id = user_product_prior.user_id.map(user_dict) user_product_prior.product_id = user_product_prior.product_id.map(item_dict) return user_product_prior user_product_prior = map_user_product_df(user_product_prior, user_dict, item_dict) user_product_validation = map_user_product_df(user_product_validation, user_dict, item_dict) user_product_prior user_product_validation user_product_validation = user_product_validation.dropna() user_product_validation.isnull().sum() def get_user_product_normalized(user_product_prior): max_quantity = user_product_prior['quantity'].max() user_product_prior['quantity'] = user_product_prior['quantity'].apply(lambda x: (x-1)/(max_quantity-1)) return user_product_prior user_product_prior_normalized = get_user_product_normalized(user_product_prior) user_product_prior_normalized # + executionInfo={"elapsed": 867, "status": "ok", "timestamp": 1603809633564, "user": {"displayName": "<NAME> _", "photoUrl": "", "userId": "13823255111802956482"}, "user_tz": -480} id="-7StKFM1h1y6" def logistic_fn(x): ''' a logistic used to normalize feature vector product ''' return 1/(1 + np.exp(-x)) # - def logistic_fn_dev(x): ''' a logistic derivative ''' return np.exp(x)/((1+np.exp(x))**2) class PMF_V1(): def __init__(self, train_set, # train_set dtype: df validation_set, # validation_set dtype: df N, # number of users: int M, # number of products: int K, # number of latent factors: int eta, # learning rate lbd, # regularization para epoch # number of epoch ): self.train_set = train_set self.validation_set = validation_set self.N = N self.M = M self.K = K self.eta = eta self.lbd = lbd self.epoch = epoch def train(self): np.random.seed(421) ; U = np.random.normal(0, 0.1, (self.N, self.K)) np.random.seed(421) ; V = np.random.normal(0, 0.1, (self.M, self.K)) train_set_np = self.train_set.to_numpy() validation_set_np = self.validation_set.to_numpy() L = 1000.0 RMSE = [] LOSS = [] for step in tqdm(range(self.epoch)): loss = 0.0 for data in train_set_np: u = int(data[0]) i = int(data[1]) r = data[2] e = r - logistic_fn(np.dot(U[u], V[i].T)) U[u] = U[u] + self.eta*(e*logistic_fn_dev(np.dot(U[u], V[i].T))*V[i] - self.lbd*U[u]) V[i] = V[i] + self.eta*(e*logistic_fn_dev(np.dot(U[u], V[i].T))*U[u] - self.lbd*V[i]) loss += 0.5*(e**2 + self.lbd*(np.linalg.norm(U[u]) + np.linalg.norm(V[i]))) LOSS.append(loss) rmse_iter = self.get_rmse_val(validation_set_np, U, V) RMSE.append(rmse_iter) print(LOSS) print(RMSE) if loss < L: break return RMSE, LOSS, U, V def get_rmse_val(self, validation_set_np, U, V): rmse = 0.0 for data in validation_set_np: u = int(data[0]) i = int(data[1]) r = data[2] e = r - logistic_fn(np.dot(U[u], V[i].T)) rmse += np.square(e) return np.sqrt(rmse/len(validation_set_np)) def prediction(self, P, Q): N,K = P.shape M,K = Q.shape rating_list=[] for u in range(N): u_rating = np.sum(P[u,:]*Q, axis=1) rating_list.append(u_rating) r_pred = np.array(rating_list) return r_pred # + class PMF_V2(): def __init__(self, train_set, # train_set dtype: df validation_set, # validation_set dtype: df N, # number of users: int M, # number of products: int K, # number of latent factors: int eta, # learning rate lbd, # regularization para epoch # number of epoch ): self.train_set = train_set self.validation_set = validation_set self.N = N self.M = M self.K = K self.eta = eta self.lbd = lbd self.epoch = epoch def train(self): np.random.seed(421) ; U = np.random.normal(0, 0.1, (self.N, self.K)) np.random.seed(421) ; V = np.random.normal(0, 0.1, (self.M, self.K)) train_set_np = self.train_set.to_numpy() validation_set_np = self.validation_set.to_numpy() L = 1000.0 # RMSE = [] LOSS = [] for step in tqdm(range(self.epoch)): loss = 0.0 for data in train_set_np: u = int(data[0]) i = int(data[1]) r = data[2] e = r - logistic_fn(np.dot(U[u], V[i].T)) U[u] = U[u] + self.eta*(e*logistic_fn_dev(np.dot(U[u], V[i].T))*V[i] - self.lbd*U[u]) V[i] = V[i] + self.eta*(e*logistic_fn_dev(np.dot(U[u], V[i].T))*U[u] - self.lbd*V[i]) loss += 0.5*(e**2 + self.lbd*(np.linalg.norm(U[u]) + np.linalg.norm(V[i]))) LOSS.append(loss) # rmse_iter = self.get_rmse_val(validation_set_np, U, V) # RMSE.append(rmse_iter) print(LOSS) # print(RMSE) if loss < L: break return LOSS, U, V # def get_rmse_val(self, validation_set_np, U, V): # rmse = 0.0 # for data in validation_set_np: # u = int(data[0]) # i = int(data[1]) # r = data[2] - 1 # e = r - logistic_fn(np.dot(U[u], V[i].T)) # rmse += np.square(e) # return np.sqrt(rmse/len(validation_set_np)) def avg_precision(self, actual, predicted): score = 0.0 num_hits = 0.0 for i, p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if num_hits == 0.0: return 0.0 return score / num_hits def prediction_map(self, U, V): scores = [] for u in tqdm(range(len(U))): user_item_score = np.matmul(U[u], V.T) recomm_items = np.argsort(user_item_score)[-1:-11:-1] gt_product = self.validation_set.query('user_id=={}'.format(u)).product_id.values score = self.avg_precision(gt_product, recomm_items) scores.append(score) if u > 0 and u % 10000 == 0: break return np.mean(scores) # - test_2 = PMF_V2(user_product_prior_normalized, user_product_validation, N = 206209, M = 49677, K = 30, eta = 0.1, lbd = 0.01, epoch = 20) RMSE_2, LOSS_2, U_2, V_2 = test_2.train() MAP_2 = test_2.prediction_map(U_2, V_2) MAP_2 test_3 = PMF_V2(user_product_prior_normalized, user_product_validation, N = 206209, M = 49677, K = 40, eta = 0.1, lbd = 0.01, epoch = 30) RMSE_3, LOSS_3, U_3, V_3 = test_3.train() MAP_3 = test_3.prediction_map(U_3, V_3) MAP_3 test = PMF_V2(user_product_prior_normalized, user_product_validation, N = 206209, M = 49677, K = 20, eta = 0.01, lbd = 0.001, epoch = 30) RMSE, LOSS, U, V = test.train() u_scores = np.matmul(U[88], V.T) sorted_idx = np.argsort(u_scores)[-1:-11:-1] sorted_idx, user_product_validation.query('user_id==88') MAP = test.prediction_map(U, V) MAP user_product_validation.query('product_id==48521') # + # Figure(LOSS, RMSE) # - test_2 = PMF_V2(user_product_prior_normalized, user_product_validation, N = 206209, M = 49677, K = 20, eta = 0.01, lbd = 0.0001, epoch = 30) RMSE, LOSS, U, V = test_2.train() MAP_2 = test_2.prediction_map(U, V) MAP_2 loss_k_30_eta_0_1_lbd_0_001 = [1018992.5404728408, 335756.3361676689, 154343.78715548394, 105592.94940664852, 83709.69361526785, 71557.49132956797, 63942.840274595495, 58779.93868424015, 55078.263111409484, 52310.99349509732, 50173.880130509526, 48479.81999174722, 47107.901787709896, 45976.786581528875, 45029.90803985403, 44226.81254629806, 43537.87062395146, 42940.9279419077, 42419.11746503053, 41959.39095230122, 41551.509671836546, 41187.336132534896, 40860.327779091276, 40565.16899760188, 40297.499589870866, 40053.71165656828, 39830.79573194091, 39626.22287352075, 39437.85333848688, 39263.86515292958] loss_k_40_eta_0_1_lbd_0_001 = [1004186.8066271984, 328263.528151722, 152975.4902470725, 105141.17249560913, 83557.68554420622, 71538.78615480174, 63994.1193839823, 58871.905120454925, 55195.45099613362, 52444.42724175973, 50317.97206809847, 48630.9025867458, 47263.46890561089, 46135.07209568307, 45189.63759752207, 44387.05002173118, 43697.91917697989, 43100.26403029647, 42577.345540639784, 42116.211577382914, 41706.69658464001, 41340.71942873341, 41011.781370245946, 40714.60112567691, 40444.84556524107, 40198.92823922397, 39973.85674285425, 39767.11573816922, 39576.576344343135, 39400.425259297015] loss_k_40_eta_0_1_lbd_0_01 = [1430603.5435018882, 750964.6266716269, 444291.3130135707, 361831.1897208446, 327372.11735735374, 308995.4587775118, 297800.1098603388, 290385.59432200453, 285184.5574546951, 281379.30183456006, 278503.9519000853, 276274.9677188406, 274510.6815135321, 273089.8030955978, 271928.57369761146, 270967.4999135267, 270163.3028004697, 269483.84655698907, 268904.8407116806, 268407.63574607583, 267977.7137686066, 267603.6329847235, 267276.27553181327, 266988.30242146994, 266733.7525586916, 266507.7437028977, 266306.2466705807, 266125.9129010787, 265963.94139938825, 265817.9750784928] loss_k_30_eta_0_2_lbd_0_001 = [571492.8034147689, 131028.7930000466, 76233.56373152077, 60742.8050131463, 53380.06202145347, 49152.71968045616, 46442.20888083213, 44570.97535696634, 43208.39982226448, 42175.34611194008, 41366.98819964953, 40718.22167844158, 40186.63456668066, 39743.492307305576, 39368.677475406614, 39047.70752204178, 38769.90343361721, 38527.22490886872, 38313.50657430421, 38123.943640946396, 37954.73729397088, 37802.84504055136, 37665.80162624364, 37541.58838594855, 37428.53646143256, 37325.25410246228, 37230.57136148613, 37143.497529151595, 37063.18802550676, 36988.918392239655] x = range(len(loss_k_30_eta_0_1_lbd_0_001)) plt.plot(x, loss_k_30_eta_0_1_lbd_0_001, 'r--', x, loss_k_40_eta_0_1_lbd_0_001, 'bs', x, loss_k_40_eta_0_1_lbd_0_01, 'g^', x, loss_k_30_eta_0_2_lbd_0_001, 'y') plt.title('Convergence curve') plt.xlabel('Iterations') plt.ylabel('Loss') plt.legend(['K=30, eta=0.1, lbd=0.001', 'K=40, eta=0.1, lbd=0.001', 'K=40, eta=0.1, lbd=0.01', 'K=30, eta=0.2, lbd=0.001']) plt.show()
Model 2.2 - PMF_SGD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # standard. import pandas as pd from pandas import Series, DataFrame # + # reading the data from csv file titanic_df = pd.read_csv('train.csv') # preview of data titanic_df.head() # + # quick grab of data titanic_df.info() # - # ## Questions # # All good data analysis projects begin with trying to answer questions. Now that we know what column category data we have let's think of some questions or insights we would like to obtain from the data. So here's a list of questions we'll try to answer using our new data analysis skills! # # First some basic questions: # # 1.) Who were the passengers on the Titanic? (Ages,Gender,Class,..etc) # # 2.) What deck were the passengers on and how does that relate to their class? # # 3.) Where did the passengers come from? # # 4.) Who was alone and who was with family? # # # # Then we'll dig deeper, with a broader question: # # 5.) What factors helped someone survive the sinking? # # # # So let's start with the first question: Who were the passengers on the titanic? # + # plotting library import matplotlib.pyplot as plt import seaborn as sns import numpy as np # %matplotlib inline # + # quick look at the sex of people on the titanic """ we will use factor plot for this which takes a coloum name and divide on the basis of the avaliable data. """ sns.factorplot('Sex', data=titanic_df) # + # Male and female in each class sns.factorplot('Sex', data=titanic_df, hue='Pclass') # + # better way sns.factorplot('Pclass', data=titanic_df, hue='Sex') # - # Wow, quite a few more males in the 3rd class than females, an interesting find. However, it might be useful to know the split between males,females,and children. How can we go about this? # + # We'll treat anyone as under 16 as a child, and then use the apply technique with a function to create a new column def male_female_child(passenger): age, sex = passenger if age < 16: return 'child' else: return sex # creating a passenger coloumn in the titanic_df # since it is a coloumn and not index we need to set axis to 1 titanic_df['Person'] = titanic_df[['Age', 'Sex']].apply(male_female_child, axis=1) # - titanic_df[0:10] # + # factor plot for person sns.factorplot("Pclass", data=titanic_df, hue="Person") # + # hist plot of ages titanic_df['Age'].hist(bins=70) # + # find the mean age titanic_df['Age'].mean() # - titanic_df.info() # + # removinf a coloumn from data frame titanic_df.drop('person', axis=1, inplace=True) # - titanic_df.head() # + # to get details of each individual sex in person titanic_df['Person'].value_counts() # + # Another way to visualize the data is to use FacetGrid to plot multiple kedplots on one plot # Set the figure equal to a facetgrid with the pandas # dataframe as its data source, set the hue, and change the aspect ratio. fig = sns.FacetGrid(titanic_df, hue='Sex', aspect=4) # Next use map to plot all the possible kdeplots for the 'Age' column by the hue choice fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic_df['Age'].max() fig.set(xlim=(1, oldest)) fig.add_legend() # + # similary plotting for person fig = sns.FacetGrid(titanic_df, hue='Person', aspect=4) # Next use map to plot all the possible kdeplots for the 'Age' column by the hue choice fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic_df['Age'].max() fig.set(xlim=(1, oldest)) fig.add_legend() # + # similary plotting for class fig = sns.FacetGrid(titanic_df, hue='Pclass', aspect=4) # Next use map to plot all the possible kdeplots for the 'Age' column by the hue choice fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic_df['Age'].max() fig.set(xlim=(1, oldest)) fig.add_legend() # - titanic_df.head() # + # First we'll drop the NaN values and create a new object, deck deck = titanic_df['Cabin'].dropna() deck.head() # + # We need only first letter in the deck not we will remove the rest of the data levels = [] # grabbing the First letter for i in deck: levels.append(i[0]) # Make a cabin dataFrame cabin_df = DataFrame(levels) cabin_df.columns = ['Cabins'] sns.factorplot('Cabins', data=cabin_df, palette='winter_d') # + # Redefine cabin_df as everything but where the row was equal to 'T' cabin_df = cabin_df[cabin_df.Cabins != 'T'] sns.factorplot('Cabins', data=cabin_df, palette='summer') # - # # Now that we've analyzed the distribution by decks, let's go ahead and answer our third question: # # 3.) Where did the passengers come from? # + # Factor plot of where people came from. ''' plot tells how many people came from which places and which class ''' # by using x_order we can remove the nan plots sns.factorplot('Embarked', data=titanic_df, x_order=['C', 'Q', 'S'], hue='Pclass', aspect=2) # - # # An interesting find here is that in Queenstown, almost all the passengers that boarded there were 3rd class. It would be intersting to look at the economics of that town in that time period for further investigation. # # Now let's take a look at the 4th question: # # 4.) Who was alone and who was with family? # + # Adding alone column titanic_df['Alone'] = titanic_df['SibSp'] + titanic_df['Parch'] titanic_df['Alone'].loc[titanic_df['Alone'] > 0] = 'With family' titanic_df['Alone'].loc[titanic_df['Alone'] == 0] = 'Alone' # - titanic_df[0:10] # + # plotting sns.factorplot('Alone', data=titanic_df, palette='Blues', hue='Sex') # - # Now that we've throughly analyzed the data let's go ahead and take a look at the most interesting (and open-ended) question: What factors helped someone survive the sinking? # + # Making a survivor column using surivived values titanic_df['survivor'] = titanic_df.Survived.map({0: 'No', 1:"Yes"}) titanic_df[0:10] # + # survival of men and women sns.factorplot('survivor', data=titanic_df, palette='Reds', hue='Sex') # + # survival based on class #sns.factorplot('survivor', data=titanic_df, palette='Reds', hue='Pclass') sns.factorplot('Pclass', 'Survived', data=titanic_df) # - sns.factorplot('Survived', data=titanic_df, palette='Reds', hue='Sex') # + # unfavouriable conditions for survival. sns.factorplot('Pclass', 'Survived', data=titanic_df, hue='Person') # - # # From this data it looks like being a male or being in 3rd class were both not favourable for survival. Even regardless of class the result of being a male in any class dramatically decreases your chances of survival. # # But what about age? Did being younger or older have an effect on survival rate? sns.lmplot('Age', 'Survived', data=titanic_df) # # Looks like there is a general trend that the older the passenger was, the less likely they survived. Let's go ahead and use hue to take a look at the effect of class and age. # Let's use a linear plot on age versus survival using hue for class seperation sns.lmplot('Age','Survived',hue='Pclass',data=titanic_df,palette='winter') # # We can also use the x_bin argument to clean up this figure and grab the data and bin it by age with a std attached! # + # cleaning up the plot generations = [10, 20, 40, 60, 80] sns.lmplot('Age','Survived',hue='Pclass',data=titanic_df,palette='winter', x_bins=generations) # - # What about if we relate gender and age with the survival set? sns.lmplot('Age','Survived',hue='Sex',data=titanic_df,palette='winter', x_bins=generations) # 1.) Did the deck have an effect on the passengers survival rate? Did this answer match up with your intuition? # + # concatinating the data frames deck = [] for i in titanic_df['Cabin']: if str(i) == 'NaN': deck.append(0) else: deck.append(str(i)[0]) titanic_df['Deck'] = deck sns.factorplot('Deck', x_order=['A', 'B', 'C', 'D', 'E', 'F', 'G', 'T'], data=titanic_df, hue='Survived') # - # 2.) Did having a family member increase the odds of surviving the crash? sns.factorplot('Survived', data=titanic_df, hue='Alone') titanic_df['Alone'].value_counts() from IPython.display import Image Image(url='http://i.imgur.com/DGNjT.gif')
Titanic Intro project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark3 # name: pyspark3kernel # --- # # Read and write from Spark to SQL using the MSSQL jdbc Connector # A typical big data scenario a key usage pattern is high volume, velocity and variety data processing in Spark followed with batch/streaming writes to SQL for access to LOB applications. These usage patterns greatly benefit from a connector that utilizes key SQL optimizations and provides an efficient and reliable write to SQLServer Big Data Cluster or SQL DB. # # MSSQL JDBC connector, referenced by the name com.microsoft.sqlserver.jdbc.spark, uses [SQL Server Bulk copy APIS](https://docs.microsoft.com/en-us/sql/connect/jdbc/using-bulk-copy-with-the-jdbc-driver?view=sql-server-2017#sqlserverbulkcopyoptions) to implement an efficient write to SQL Server. The connector is based on Spark Data source APIs and provides a familiar JDBC interface for access. # # The following sample shows how to use the MSSQL JDBC Connector for writing and reading to/from a SQL Source. In this sample we' ll # - Read a file from HDFS and do some basic processing # - post that we'll write the dataframe to SQL server table using the MSSQL Connector. # - Followed by the write we'll read using the MSSQLConnector. # # PreReq : # - The sample uses a SQL database named "MyTestDatabase". Create this before you run this sample. The database can be created as follows # ``` sql # Create DATABASE MyTestDatabase # GO # ``` # - Download [AdultCensusIncome.csv]( https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv ) to your local machine. Create a hdfs folder named spark_data and upload the file there. # - Configure the spark session to use the MSSQL Connector jar. The jar can be found at /jar/spark-mssql-connector-assembly-1.0.0.jar post deployment of Big Data Cluster. # # ``` sh # # %%configure -f # {"conf": {"spark.jars": "/jar/spark-mssql-connector-assembly-1.0.0.jar"}} # ``` # # # # # Configure the notebook to use the MSSQL Spark connector # This step woould be removed in subsequent CTPs. As of CTP2.5 this step is required to point the spark session to the relevant jar. # # + # %%configure -f {"conf": {"spark.jars": "/jar/spark-mssql-connector-assembly-1.0.0.jar"}} # - # # Read data into a data frame # In this step we read the data into a data frame and do some basic clearup steps. # # #Read a file and then write it to the SQL table datafile = "/spark_data/AdultCensusIncome.csv" df = spark.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile) df.show(5) # + #Process this data. Very simple data cleanup steps. Replacing "-" with "_" in column names columns_new = [col.replace("-", "_") for col in df.columns] df = df.toDF(*columns_new) df.show(5) # - # # Write dataframe to SQL using MSSQL Spark Connector # + #Write from Spark to SQL table using MSSQL Spark Connector print("Use MSSQL connector to write to master SQL instance ") servername = "jdbc:sqlserver://master-0.master-svc" dbname = "MyTestDatabase" url = servername + ";" + "databaseName=" + dbname + ";" dbtable = "dbo.AdultCensus" user = "sa" password = "****" # Please specify password here try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("overwrite") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("user", user) \ .option("password", password)\ .save() except ValueError as error : print("MSSQL Connector write failed", error) print("MSSQL Connector write succeeded ") # - # # Read SQL Table using MSSQL Spark connector. # The following code uses the connetor to read the tables. To confirm the write about check table directly using SQL # + #Read from SQL table using MSSQ Connector print("read data from SQL server table ") jdbcDF = spark.read \ .format("com.microsoft.sqlserver.jdbc.spark") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("user", user) \ .option("password", password) \ .load() jdbcDF.show(5)
samples/features/sql-big-data-cluster/spark/spark_to_sql/mssql_spark_connector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example Analysis - Pro-/Anti-Reach Task # This Python code showcases some of the data stored in the JSON experiment data format, as well as common analyses like extracting trial results, movement sample data, and participant metadata. # + # %matplotlib notebook import os import glob import json import numpy as np import pandas as pd import matplotlib.pyplot as plt from palettable.colorbrewer.qualitative import Set1_9 plt.style.use('seaborn-whitegrid') # + folder_data = '../data' CONDS = ['left_pro', 'right_pro', 'left_anti', 'right_anti'] CONDS_TEXT = ['Left / Pro', 'Right / Pro', 'Left / Anti', 'Right / Anti'] # Participants to show in example figures EXAMPLE_PPID = 2 GAZE_EXAMPLE_PPID = 1 # - # ## Data Import and Preprocessing # This section extracts individual datasets from the JSON files and combines them for analysis. Afterwards, we end up with the following data structures: # # - *data*: dict of all raw JSON datasets, not analyzed further here # - *trials*: pandas DataFrame of behavioral results, one row per trial # - *samples*: list of DataFrames with movement and gaze data, one per trial, matched to *trials* # - *metadata*: DataFrame of demographic metadata, one row per participant # - *gaze_acc*: DataFrame of average eye tracking quality, one row per participant # - *gaze_targets*: dict of DataFrames for each calibration target (5 targets), structure like *gaze_acc* # + # Import all experiment data from the JSON file data = {} metadata = [] gaze_acc = [] gaze_targets = {} trials = [] samples = [] data_files = glob.glob(os.path.join(folder_data, '*.json')) for df in data_files: with open(df, 'r') as dfh: expdata = json.load(dfh) # Contents of the expdata dict imported from JSON are: # dict_keys(['eye_tracker_validations', 'participant', 'config', 'name', 'trials']) # Copy the demographic data entered at experiment start metadata.append(expdata['participant'].copy()) ppid = int(expdata['participant']['id']) # Extract the averaged gaze validation data metrics = expdata['eye_tracker_validations'][0].copy() gaze_targets[ppid] = pd.DataFrame(metrics['targets']) del metrics['targets'] del metrics['samples'] del metrics['metadata'] metrics['ppid'] = ppid gaze_acc.append(metrics) # Target / eye height and distance were saved to the config structure, # as they were the same for all trials eyeheight = expdata['config']['eyeheight'] tar_dist = expdata['config']['tar_dist'] # Trial data # This can also be saved and imported as a CSV text file for trial in expdata['trials']: # Combine trial parametrs and results into one dict tdata = trial['params'].copy() tdata.update(trial['results']) # Add target information to the trial table for easier analysis tdata['ppid'] = ppid tdata['trial_index'] = trial['index'] tdata['tar_y'] = eyeheight tdata['tar_z'] = tar_dist # Define a condition label for analysis if tdata['pro'] == 1 and tdata['target'] == 'left': tdata['condition'] = 'left_pro' tdata['tar_x_eff'] = -0.3 elif tdata['pro'] == 0 and tdata['target'] == 'left': tdata['condition'] = 'left_anti' tdata['tar_x_eff'] = 0.3 elif tdata['pro'] == 1 and tdata['target'] == 'right': tdata['condition'] = 'right_pro' tdata['tar_x_eff'] = 0.3 elif tdata['pro'] == 0 and tdata['target'] == 'right': tdata['condition'] = 'right_anti' tdata['tar_x_eff'] = -0.3 trials.append(tdata) # Extract movement sample data sam = pd.DataFrame(trial['samples']) samples.append(sam) # Main data dict data[ppid] = expdata metadata = pd.DataFrame(metadata) metadata.loc[:, 'age'] = pd.to_numeric(metadata.loc[:, 'age']) # Metadata are strings by default gaze_acc = pd.DataFrame(gaze_acc) trials = pd.DataFrame(trials) # Calculate horizontal endpoint error to cued target location trials.loc[:, 'endpoint_error'] = np.abs(trials.hit_x - trials.tar_x_eff) display(trials.head()) # - # The above table shows the structure of per-trial behaviroal data for the first five trials in the dataset. # # The next two cells extract the hand movement trajectories for each trial, align them with the trial start (time when the participant fixated on the target sphere), then group and average trajectories for each of the four movement conditions (pro/left, pro/right, anti/left, anti/right). # + # Calculate movement onsets for each trial based on velocity ONSET_VEL = 0.003 movements = [] # Separate trajectories by condition proL = [] antiL = [] proR = [] antiR = [] max_len = 0 for trial in trials.iterrows(): idx = trial[0] t = trial[1] s = samples[idx].copy() # Calculate velocity per axis by numerical differentiation # For this simple analysis, we are ignoring time stamp variability s.loc[:, 'vX'] = s.controller_posX.diff() s.loc[:, 'vY'] = s.controller_posY.diff() s.loc[:, 'vZ'] = s.controller_posZ.diff() s.loc[:, 'is_onset'] = 0 s.loc[:, 'is_offset'] = 0 # Add time column relative to go cue time s.loc[:, 'time_rel'] = s.time - t.go_time samples[idx].loc[:, 'time_rel'] = s.loc[:, 'time_rel'] # Skip beginning of the trial # Note that fix_onset_time means the onset of the *participant's* fixation on the target s = s.loc[s.time >= (t.fix_onset_time), :] # Remove all samples with negative velocities (return movement) s = s.loc[s.vZ > 0, :] # Find first timestamp where Z (forward) velocity exceeds threshold onset_idx = np.argmax(s.vZ.values >= ONSET_VEL) onset = s.time.values[onset_idx] latency = onset - t.go_time s.loc[s.index[onset_idx], 'is_onset'] = 1 # We use the reach time (vizproximity sensor) for movement end for this example # Note that this indicates the time the hand crossed the distance threshold, # but actual reach endpoints might be a bit further offset = t.reach_time duration = offset - onset # Add movement timing data to trial table for timing analysis trials.loc[idx, 'move_onset'] = onset trials.loc[idx, 'move_offset'] = offset trials.loc[idx, 'move_latency'] = latency trials.loc[idx, 'move_duration'] = duration # Sort sample participant traces by condition if t.ppid == EXAMPLE_PPID: trace = samples[idx].loc[(samples[idx].time >= t.go_time) & (samples[idx].time <= offset), ('controller_posX', 'controller_posY', 'controller_posZ')].values if t.target == 'left' and t.pro == 1: proL.append(trace) elif t.target == 'left' and t.pro == 0: antiL.append(trace) elif t.target == 'right' and t.pro == 1: proR.append(trace) elif t.target == 'right' and t.pro == 0: antiR.append(trace) if trace.shape[0] > max_len: max_len = trace.shape[0] if s.shape[0] > 0: s.loc[:, 'move_time'] = s.time - s.time.values[0] movements.append(s) print(trials.shape, len(movements)) # + # Calculate average trajectories for pro/anti reaches # Movements have different lengths, so we align with onset # and average positions while dropping NaNs pro_left = np.ones((max_len, 3, len(proL))) * np.nan anti_left = np.ones((max_len, 3, len(antiL))) * np.nan pro_right = np.ones((max_len, 3, len(proR))) * np.nan anti_right = np.ones((max_len, 3, len(antiR))) * np.nan for ix, tr in enumerate(proL): pro_left[0:tr.shape[0], :, ix] = tr pro_left = np.nanmedian(pro_left, 2) for ix, tr in enumerate(proR): pro_right[0:tr.shape[0], :, ix] = tr pro_right = np.nanmedian(pro_right, 2) for ix, tr in enumerate(antiL): anti_left[0:tr.shape[0], :, ix] = tr anti_left = np.nanmedian(anti_left, 2) for ix, tr in enumerate(antiR): anti_right[0:tr.shape[0], :, ix] = tr anti_right = np.nanmedian(anti_right, 2) # - # Now perform outlier correction on the movement parameters, i.e. remove trials with unsually short or long movement onset latency or duration (i.e., +/- 3 standard deviations). # + # Outlier correction for movement latency and movement duration SDS = 3.0 trials.loc[:, 'valid'] = 1 # Movement latency lat_m = trials.loc[trials.valid == 1, :].move_latency.mean() lat_sd = trials.loc[trials.valid == 1, :].move_latency.std() trials.loc[(trials.move_latency <= lat_m - (SDS * lat_sd)) | (trials.move_latency >= lat_m +( SDS * lat_sd)), 'valid'] = 0 # Movement duration dur_m = trials.loc[trials.valid == 1, :].move_duration.mean() dur_sd = trials.loc[trials.valid == 1, :].move_duration.std() trials.loc[(trials.move_duration <= dur_m - (SDS * dur_sd)) | (trials.move_duration >= dur_m + (SDS * dur_sd)), 'valid'] = 0 invalid = trials.shape[0] - trials.valid.sum() valid_percent = (trials.valid.sum() / trials.shape[0]) * 100 print('{:d} trials removed, {:.2f}% valid trials'.format(invalid, valid_percent)) display(trials.groupby('ppid').sum().valid.to_frame().T) trials_valid = trials.loc[trials.valid == 1, :].copy() print(trials_valid.shape) # - # ## Demographics and Eye Tracking Accuracy # Here we show typical demographic informations that might go into a *Participants* section in a paper. We also calculate group average, minimum and maximum eye tracking accuracy in degrees and plot individual accuracy data for each tested calibration target below. # Extract demographic information on participants display(metadata.loc[:, 'age'].describe().to_frame().T) display(metadata.loc[:, 'gender'].value_counts().to_frame()) # Descriptives on eye tracking validation data display(gaze_acc.loc[:, 'acc'].describe().to_frame().T) # + # Gaze accuracy plot fig, ax = plt.subplots(1, 1, figsize=(3.25, 3.25), dpi=90) labels = [] handles = [] # Plot calibration target positions in degrees ax.axhline(0, linestyle=':', linewidth=0.7, color=[0.8, 0.8, 0.8], zorder=1) ax.axvline(0, linestyle=':', linewidth=0.7, color=[0.8, 0.8, 0.8], zorder=1) ax.errorbar(x=gaze_targets[1].x, y=gaze_targets[1].y, xerr=0.25, yerr=0.25, marker=None, markersize=4, linewidth=0, elinewidth=1.5, color='k', zorder=10) # Plot individual results per target for ppid in gaze_targets.keys(): tar_df = gaze_targets[ppid] h = ax.errorbar(x=tar_df.avgX, y=tar_df.avgY, xerr=tar_df.sdX, yerr=tar_df.sdY, marker='o', solid_joinstyle='round', markersize=4, linewidth=0, elinewidth=1.0, color=Set1_9.mpl_colors[ppid-1], markeredgecolor="None") handles.append(h) # Add average accuracy to legend acc = gaze_acc.loc[gaze_acc.ppid == ppid, 'acc'].values[0] labels.append('{:d} ({:.2f}°)'.format(ppid, acc)) ax.set_xlabel('Horizontal gaze angle (°)', fontsize=12) ax.set_ylabel('Vertical gaze angle (°)', fontsize=12) ax.set_xticks([-6, -4, -2, 0, 2, 4, 6]) ax.set_yticks([-6, -4, -2, 0, 2, 4, 6]) ax.tick_params(axis='both', which='major', labelsize=10) legend = ax.legend(handles, labels, title='Participant', frameon=False, fontsize=8, loc='upper right') legend._legend_box.align='left' ax.grid(False, which='major', axis='x') ax.grid(False, which='major', axis='y') for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(1.0) ax.spines[axis].set_color('k') # Save figure plt.tight_layout() plt.subplots_adjust(wspace=0.2) plt.savefig('fig_gaze_acc.png', bbox_inches='tight', dpi=150) # - # ## Per-Trial Behavioral Data # These cells give an example of behavioral data recorded as one data point per trial, such as the timing of each movement, distance between hand and target when the distance threshold was crossed, and whether the response (movement direction) was correct with regard to the color cue or not. # Percent correct responses (left / right hemifield) per_corr = trials_valid.groupby('condition').agg({'correct': 'sum', 'ppid': 'count'}) per_corr.loc[:, 'percent_correct'] = (per_corr.correct / per_corr.ppid) * 100 display(per_corr) # + # Plot movement timing parameters DODGE = 0.05 fig = plt.figure(figsize=(4, 5), dpi=90) ax1 = fig.add_subplot(3,1,1) ax2 = fig.add_subplot(3,1,2) ax3 = fig.add_subplot(3,1,3) for cidx, cond in enumerate(CONDS): if cond.split('_')[1] == 'pro': marker = 'o' mcolor = 'k' else: marker='s' mcolor = 'gray' # Individual participant data indiv = trials_valid.loc[(trials_valid.condition == cond), :].groupby('ppid').mean().reset_index() # Onset Latency for ppid in indiv.ppid.values: ax1.plot(cidx - DODGE, indiv.loc[indiv.ppid == ppid, 'move_latency'], color=Set1_9.mpl_colors[ppid-1], marker=marker, markersize=4, alpha=0.6) # Duration for ppid in indiv.ppid.values: ax2.plot(cidx - DODGE, indiv.loc[indiv.ppid == ppid, 'move_duration'], color=Set1_9.mpl_colors[ppid-1], marker=marker, markersize=4, alpha=0.6) # Endpoint error for ppid in indiv.ppid.values: ax3.plot(cidx - DODGE, indiv.loc[indiv.ppid == ppid, 'endpoint_error'] * 100, color=Set1_9.mpl_colors[ppid-1], marker=marker, markersize=4, alpha=0.6) # Averages + SE lat_avg = indiv.move_latency.mean() lat_avg_se = indiv.move_latency.std() / np.sqrt(indiv.shape[0]) dur_avg = indiv.move_duration.mean() dur_avg_se = indiv.move_duration.std() / np.sqrt(indiv.shape[0]) err_avg = indiv.endpoint_error.mean() err_avg_se = indiv.endpoint_error.std() / np.sqrt(indiv.shape[0]) ax1.errorbar(x=cidx + DODGE, y=lat_avg, yerr=lat_avg_se, color=mcolor, marker=marker, markersize=6, alpha=1.0) ax2.errorbar(x=cidx + DODGE, y=dur_avg, yerr=dur_avg_se, color=mcolor, marker=marker, markersize=6, alpha=1.0) ax3.errorbar(x=cidx + DODGE, y=err_avg * 100, yerr=err_avg_se * 100, color=mcolor, marker=marker, markersize=6, alpha=1.0) ax1.set_ylabel('Latency (ms)', fontsize=12) ax1.set_xticklabels([]) ax1.set_ylim([-600, 800]) ax2.set_ylabel('Duration (ms)', fontsize=12) ax2.set_ylim([0, 1200]) ax2.set_xticklabels([]) ax3.set_ylabel('Hor. Error (cm)', fontsize=12) ax3.set_xlabel('Condition', fontsize=12) ax3.set_xticks(range(0, len(CONDS))) ax3.set_xticklabels(['Left /\nPro', 'Right /\nPro', 'Left /\nAnti', 'Right /\nAnti']) ax3.xaxis.set_tick_params(pad=8) ax3.set_ylim([0, 7]) for ax in [ax1, ax2, ax3]: for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(1.0) ax.spines[axis].set_color('k') ax.grid(False, which='major', axis='x') # Save figure fig.align_ylabels() plt.tight_layout() plt.subplots_adjust(hspace=0.15) plt.savefig('fig_behavioral.png', bbox_inches='tight', dpi=150) # - # ## Continuous Behavioral Data # Finally, we plot some example of continuous behavioral data, recorded at 90 Hz (once per VR HMD display frame). # + # Controller movement data TRACE_WIDTH = 0.8 TRACE_ALPHA = 0.2 AVG_WIDTH = 1.5 AVG_ALPHA = 0.9 MAX_AVG_SAMPLES = 68 # average for ~750 ms fig, ax = plt.subplots(2, 1, figsize=(3.25, 5.45), dpi=90) # Grab data from sample participant tr_index = trials_valid.loc[trials_valid.ppid == EXAMPLE_PPID, :].index.values for ix, s in enumerate(samples): if ix in tr_index: t = trials_valid.loc[ix, :] # Set plot style and collect traces if t.target == 'left': ls = '-' elif t.target == 'right': ls = ':' if t.pro == 1: color = Set1_9.mpl_colors[1] elif t.pro == 0: color = Set1_9.mpl_colors[0] # Plot individual trial trajectories ax[0].plot(s.time_rel, s.controller_posX, linestyle=ls, color=color, linewidth=TRACE_WIDTH, alpha=TRACE_ALPHA) ax[1].plot(s.time_rel, s.controller_posY, linestyle=ls, color=color, linewidth=TRACE_WIDTH, alpha=TRACE_ALPHA) # Plot average trajectories for coord in [0, 1]: h1, = ax[coord].plot((np.arange(0, MAX_AVG_SAMPLES) * 11.1111), pro_left[:MAX_AVG_SAMPLES, coord], linestyle='-', color=Set1_9.mpl_colors[1], linewidth=AVG_WIDTH, alpha=AVG_ALPHA) h2, = ax[coord].plot((np.arange(0, MAX_AVG_SAMPLES) * 11.1111), pro_right[:MAX_AVG_SAMPLES, coord], linestyle=':', color=Set1_9.mpl_colors[1], linewidth=AVG_WIDTH, alpha=AVG_ALPHA) h3, = ax[coord].plot((np.arange(0, MAX_AVG_SAMPLES) * 11.1111), anti_left[:MAX_AVG_SAMPLES, coord], linestyle='-', color=Set1_9.mpl_colors[0], linewidth=AVG_WIDTH, alpha=AVG_ALPHA) h4, = ax[coord].plot((np.arange(0, MAX_AVG_SAMPLES) * 11.1111), anti_right[:MAX_AVG_SAMPLES, coord], linestyle=':', color=Set1_9.mpl_colors[0], linewidth=AVG_WIDTH, alpha=AVG_ALPHA) # Legend handles = [h1, h2, h3, h4] labels = CONDS_TEXT legend = ax[1].legend(handles, labels, title='Condition', frameon=False, fontsize=8, loc='lower right') legend._legend_box.align='left' # General settings for all subplots ax[0].set_ylabel('Horizontal Hand Position (m)', fontsize=12) ax[1].set_ylabel('Vertical Hand Position (m)', fontsize=12) ax[0].set_xticks([]) ax[1].set_xlabel('Time After Go Cue (ms)', fontsize=12) # General options for all subplots for axx in ax: axx.set_xlim([-250, 1000]) axx.axvline(0, linestyle=':', linewidth=1.0, color='k', zorder=1) axx.grid(False, which='major', axis='x') axx.grid(False, which='major', axis='y') for axis in ['top','bottom','left','right']: axx.spines[axis].set_linewidth(1.0) axx.spines[axis].set_color('k') # Save figure fig.align_labels() plt.tight_layout() plt.subplots_adjust(hspace=0.05) plt.savefig('fig_controller.png', bbox_inches='tight', dpi=150) # + # Example gaze data TRACE_WIDTH = 1.5 TRACE_ALPHA = 0.6 fig, ax = plt.subplots(2, 1, figsize=(3.25, 5.45), dpi=90) # Grab data from sample participant tr_index = trials_valid.loc[trials_valid.ppid == GAZE_EXAMPLE_PPID, :].index.values # Sample Data (Subplot 1 & 2) for ix, s in enumerate(samples): if ix in tr_index: t = trials_valid.loc[ix, :] onset_idx = s.time_rel.abs().argmin() trace_min = s.time_rel[onset_idx] - 250 trace_max = s.time_rel[onset_idx] + 1000 trace = s.loc[(s.time_rel >= trace_min) & (s.time_rel <= trace_max), :] # Set plot style by condition if t.target == 'left': ls = '-' if t.pro == 1: color = Set1_9.mpl_colors[1] elif t.pro == 0: color = Set1_9.mpl_colors[0] elif t.target == 'right': ls = ':' if t.pro == 1: color = Set1_9.mpl_colors[1] elif t.pro == 0: color = Set1_9.mpl_colors[0] h, = ax[0].plot(s.time_rel, s.gaze3d_posX, linestyle=ls, color=color, linewidth=TRACE_WIDTH, alpha=TRACE_ALPHA) ax[1].plot(s.time_rel, s.gaze3d_posY, linestyle=ls, color=color, linewidth=TRACE_WIDTH, alpha=TRACE_ALPHA) ax[0].set_ylabel('Horizontal Gaze Position (m)', fontsize=12) ax[0].set_xlim([-250, 1000]) ax[0].set_ylim([-0.35, 0.35]) ax[0].set_xticks([]) ax[1].set_ylabel('Vertical Gaze Position (m)', fontsize=12) ax[1].set_xlabel('Time After Go Cue (ms)', fontsize=12) ax[1].set_xlim([-250, 1000]) ax[1].set_ylim([1.25 - 0.35, 1.25 + 0.35]) ax[1].xaxis.set_tick_params(pad=8) # Manually create the legend from condition styles, as we have no average traces here lpro = plt.Line2D([0,1],[0,1], linewidth=2, linestyle='-', color=Set1_9.mpl_colors[1]) lanti = plt.Line2D([0,1],[0,1], linewidth=2, linestyle='-', color=Set1_9.mpl_colors[0]) rpro = plt.Line2D([0,1],[0,1], linewidth=2, linestyle=':', color=Set1_9.mpl_colors[1]) ranti = plt.Line2D([0,1],[0,1], linewidth=2, linestyle=':', color=Set1_9.mpl_colors[0]) legend2 = ax[1].legend([lpro, rpro, lanti, ranti], CONDS_TEXT, title='Condition', frameon=False, fontsize=8, loc='lower right') legend2._legend_box.align='left' # General settings for all subplots for axx in ax: axx.axvline(0, linestyle=':', linewidth=1.0, color='k', zorder=1) axx.grid(False, which='major', axis='x') axx.grid(False, which='major', axis='y') for axis in ['top','bottom','left','right']: axx.spines[axis].set_linewidth(1.0) axx.spines[axis].set_color('k') # Save figure fig.align_labels() plt.tight_layout() plt.subplots_adjust(hspace=0.05) plt.savefig('fig_gaze.png', bbox_inches='tight', dpi=150) # -
analysis/example_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Pandas tool to take csv data input file and produce html file as output import pandas as pd csv_file = '../assets/cities.csv' html_file = '../assets/cities.html' cities_df = pd.read_csv(csv_file) cities_df.head() with open(html_file, 'w') as fo: fo.write(cities_df.to_html(index=False, justify='left', border=0, classes='table', table_id='datatable'))
WebVisualizations/Notebooks/csv_to_html_convert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn import tree from sklearn.datasets import load_iris # Load the Iris Dataset iris = load_iris() # Create and score a decision tree classifier clf = tree.DecisionTreeClassifier() clf = clf.fit(iris.data, iris.target) clf.score(iris.data, iris.target)
01-Lesson-Plans/19-Supervised-Machine-Learning/3/Activities/01-Ins_DecisionTrees/Solved/Ins_Decision_Trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- class ReadStataFile: def __init__(self,folder_name): self.folder_name= folder_name self.__load_columns__() def __load_columns__(self): file_name =self.folder_name[0:-2]+"FL" obj = open(f"./{self.folder_name}/{file_name}.DO") lines = obj.readlines() labels = {} for line in lines[2:]: if line != "#delimit ;" and len(line.strip()) >0 : my_split = line.split('\"') my_var = my_split[0].replace("label variable","").strip() my_label = my_split[1].strip() labels[my_var]=my_label else: break; self.variables = labels.keys() self.labels = labels.values() self.col_dict =labels obj.close() def get_col_modality(self,variable_name): # #delimit cr folder_name = "HTBR71DT" file_name =self.folder_name[0:-2]+"FL" obj = open(f"./{self.folder_name}/{file_name}.DO") lines = obj.readlines() flag_delimiter = 0 flag_variable_found = 0 labels ={} for line in lines[2:]: #print(line) if line.lower().strip().find("delimit") >0: flag_delimiter = 1 #print("delimiter found !!!") elif line.lower().find(variable_name.lower()) >0 and flag_delimiter == 1: flag_variable_found = 1 #print("variable found !!!") elif flag_variable_found == 1 and flag_delimiter == 1 and line.lower().find(variable_name.lower()) ==-1 and len(line.strip()) >0 and line.strip() != ";" : my_split = line.split('\"') my_var = int(my_split[0].strip()) my_label = my_split[1].strip() labels[my_var]=my_label elif flag_variable_found == 1 and flag_delimiter == 1 and line.strip() == ";" : #print("variable not found !!!") break; obj.close() return labels def get_col_label(self,variable_name): return self.col_dict.get(variable_name.lower()) def to_dataframe(self,use_modality=False): import pandas as pd file_name =self.folder_name[0:-2]+"FL" df = pd.read_stata(f"./{self.folder_name}/{file_name}.DTA",convert_categoricals=False) if use_modality: for col in self.variables: my_dict = self.get_col_modality(col) df[col].replace(my_dict,inplace=True) df.rename(columns=self.col_dict,inplace=True) return df rs = ReadStataFile("HTMR71DT") df = rs.to_dataframe() df.head() # + cols=["Beating justified: wife neglects the children", "Beating justified: wife argues with husband", "Beating justified: wife refuses to have sex with husband", "Beating justified: wife burns food"] # - df_final=df[cols] df_final
repo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.1 # language: julia # name: julia-1.6 # --- DATE = "2021-09-11" TASK = "annealing-correction-L10-K7" DIR = mkpath("$(homedir())/$(DATE)-$(TASK)") # + import Pkg pkgs = [ "BioAlignments", "BioSequences", "Clustering", # "CodecZlib", # "Colors", # "Combinatorics", "DataFrames", "DataStructures", "Dates", # "DelimitedFiles", "Distances", # "Distributions", # "EzXML", "FASTX", # "GFF3", "GraphPlot", # "HTTP", # "Impute", # "JSON", "Graphs", # "LSHFunctions", # "Measures", "MetaGraphs", "https://github.com/cjprybol/Mycelia.git", # "NumericIO", # "PlotlyJS", # "Plots", "Primes", # "Printf", "ProgressMeter", "Random", "Revise", "SparseArrays", "Statistics", "StatsBase", "StatsPlots", # "StringDistances", "uCSV", # "XLSX", ] unregistered_packages = filter(pkg -> occursin(r"(^https|git$)", pkg), pkgs) registered_packages = setdiff(pkgs, unregistered_packages) for pkg in registered_packages try eval(Meta.parse("import $(pkg)")) catch Pkg.add(pkg) Pkg.build(pkg) eval(Meta.parse("import $(pkg)")) end end for pkg_url in unregistered_packages pkg_name = replace(basename(pkg_url), ".git" => "") try eval(Meta.parse("import $(pkg_name)")) catch Pkg.develop(url=pkg_url) Pkg.build(pkg_name) eval(Meta.parse("import $(pkg_name)")) end end # - function kmer_index_and_orientation_to_kmer(graph, kmer_index, orientation) kmer = graph.vprops[kmer_index][:kmer] if !orientation kmer = BioSequences.reverse_complement(kmer) end return kmer end function determine_edge_probabilities(graph) nv = Graphs.nv(graph) edge_probabilities = SparseArrays.spzeros(nv, nv) for v in 1:nv neighbors = Graphs.neighbors(graph, v) @assert issorted(neighbors) likelihoods = zeros(length(neighbors)) for (i, neighbor) in enumerate(neighbors) if v <= neighbor edge = Graphs.Edge(v, neighbor) else edge = Graphs.Edge(neighbor, v) end @assert Graphs.has_edge(graph, edge) likelihoods[i] = graph.eprops[edge][:count] end likelihoods = likelihoods ./ sum(likelihoods) # @show likelihoods # @show neighbors for (neighbor, likelihood) in zip(neighbors, likelihoods) edge_probabilities[v, neighbor] = likelihood end end for source in 1:size(edge_probabilities, 1) destinations = findall(edge_probabilities[source, :] .> 0) destination_counts = [graph.vprops[dest][:count] for dest in destinations] destination_likelihoods = destination_counts ./ sum(destination_counts) for (dest, likelihood) in zip(destinations, destination_likelihoods) edge_probabilities[source, dest] *= likelihood end edge_probabilities[source, :] ./= sum(edge_probabilities[source, :]) @assert abs(1-sum(edge_probabilities[source, :])) <= eps(Float64) end return edge_probabilities end function orient_path(graph, kmers, path, opening_orientation, opening_kmer, closing_orientation) vertices = path path_orientations = [opening_orientation] path_kmers = [opening_kmer] @show opening_orientation for (i, vertex) in enumerate(vertices[2:end]) @show i, vertex, last(path_orientations) viable_neighbors = typeof(opening_kmer)[] viable_neighbor = nothing viable_orientations = Bool[] viable_orientation = nothing for neighbor in BioSequences.neighbors(last(path_kmers)) @show "considering going from $(last(path_kmers)) to $(neighbor)" # @show "considering going from $(last(path_kmers)) to $(neighbor)" canonical_neighbor = BioSequences.canonical(neighbor) canonical_neighbor_index_range = searchsorted(kmers, canonical_neighbor) # @show canonical_neighbor_index_range if length(canonical_neighbor_index_range) == 1 @show canonical_neighbor_index = first(canonical_neighbor_index_range) if canonical_neighbor_index == vertex @show "hit, selecting $neighbor" viable_neighbor = neighbor viable_orientation = neighbor == canonical_neighbor end end end if (viable_neighbor != nothing) && (viable_orientation != nothing) push!(path_kmers, viable_neighbor) push!(path_orientations, viable_orientation) else return nothing # not a viable path, need to look towards the next miss end end return vertices, path_orientations, path_kmers end function take_a_walk(graph, edge_likelihoods, kmers, kmer, walk_length) walk = [kmer] current_kmer_indices = searchsorted(kmers, BioSequences.canonical(last(walk))) @assert length(current_kmer_indices) == 1 current_kmer_index = first(current_kmer_indices) while length(walk) < walk_length viable_neighbors = Tuple{Int, Bool}[] for neighbor in BioSequences.neighbors(kmer) canonical_neighbor = BioSequences.canonical(neighbor) neighbor_is_canonical = neighbor == canonical_neighbor neighbor_vertex_range = searchsorted(kmers, canonical_neighbor) if !isempty(neighbor_vertex_range) neighbor_vertex = first(neighbor_vertex_range) if Graphs.has_edge(graph, current_kmer_index, neighbor_vertex) push!(viable_neighbors, (neighbor_vertex, neighbor_is_canonical)) end end end if isempty(viable_neighbors) return walk elseif length(viable_neighbors) == 1 chosen_neighbor = first(viable_neighbors) else viable_neighbor_indices = first.(viable_neighbors) step_likelihoods = StatsBase.weights(edge_likelihoods[current_kmer_index, viable_neighbor_indices]) chosen_neighbor = StatsBase.sample(viable_neighbors, step_likelihoods) end kmer = kmer_index_and_orientation_to_kmer(graph, chosen_neighbor...) push!(walk, kmer) current_kmer_indices = searchsorted(kmers, BioSequences.canonical(last(walk))) @assert length(current_kmer_indices) == 1 current_kmer_index = first(current_kmer_indices) end return walk end # + # create a random genome # - L = 10 k = 7 genome = BioSequences.randdnaseq(Random.seed!(L), L) kmer_counts = sort!(Mycelia.count_canonical_kmers(BioSequences.BigDNAMer{k}, genome)) K = length(keys(kmer_counts)) # + # create an undirected kmer graph from the sequence # - graph = MetaGraphs.MetaGraph(K) for (i, (kmer, count)) in enumerate(kmer_counts) @show i, kmer, count @show MetaGraphs.set_prop!(graph, i, :kmer, kmer) @show MetaGraphs.set_prop!(graph, i, :count, count) end graph.vprops kmers = collect(keys(kmer_counts)) for i in 1:length(genome)-k edge_range = i:i+k edge = genome[edge_range] src = BioSequences.BigDNAMer{k}(edge[1:end-1]) dst = BioSequences.BigDNAMer{k}(edge[2:end]) canonical_src = BioSequences.canonical(src) canonical_dst = BioSequences.canonical(dst) src_index = Mycelia.get_kmer_index(kmers, canonical_src) dst_index = Mycelia.get_kmer_index(kmers, canonical_dst) @show edge @show src_index, src == canonical_src, dst_index, dst == canonical_dst graph_edge = Graphs.Edge(src_index, dst_index) if Graphs.has_edge(graph, graph_edge) current_count = graph.eprops[graph_edge][:count] MetaGraphs.set_prop!(graph, graph_edge, :count, current_count+1) else Graphs.add_edge!(graph, graph_edge) MetaGraphs.set_prop!(graph, graph_edge, :count, 1) end end graph graph.eprops # nodesize = [rand(1:2) for i in 1:Graphs.nv(graph)] nodelabel = [graph.vprops[v][:kmer] for v in 1:Graphs.nv(graph)] GraphPlot.gplot(graph, nodelabel=nodelabel) edge_likelihoods = determine_edge_probabilities(graph) # + # take yen_k shortest paths # - genome_kmers = collect(BioSequences.each(BioSequences.BigDNAMer{k}, genome)) # + # make a SNP edit to the genome # - # seed = 0 # seed = 1 # seed = 2 # seed = 3 # seed = 4 # seed = 5 # seed = 6 # seed = 7 # seed = 8 seed = 9 @show seed Random.seed!(seed) observation = Mycelia.observe(genome, error_rate = 0.1) alignment = BioAlignments.pairalign(BioAlignments.LevenshteinDistance(), observation, genome) reverse_alignment = BioAlignments.pairalign(BioAlignments.LevenshteinDistance(), BioSequences.reverse_complement(observation), genome) if reverse_alignment.value < alignment.value observation = BioSequences.reverse_complement(observation) alignment = reverse_alignment @show "flipping" end # + # convert genome into stranded path # - # function sequence_to_ observation_as_oriented_kmers = [] observation_kmers = collect(BioSequences.each(BioSequences.BigDNAMer{k}, observation)) genome_kmers is_canonical = falses(length(observation_kmers)) kmer_index = zeros(Int, length(observation_kmers)) for (i, kmer) in enumerate(observation_kmers) # is_canonical[i] = kmer.fw <= kmer.bw canonical_kmer = BioSequences.canonical(kmer.fw) is_canonical[i] = kmer.fw == canonical_kmer kmer_index_range = searchsorted(kmers, canonical_kmer) if length(kmer_index_range) > 1 @error "bad" elseif isempty(kmer_index_range) # do nothing, index is 0 to indicate not found else kmer_index[i] = first(kmer_index_range) end end is_canonical kmer_index # + proposed_path = BioSequences.BigDNAMer{k}[] opening_missing_kmer_path_index = findfirst(x -> x == 0, kmer_index) # read is solid! if opening_missing_kmer_path_index == nothing # proposed_seq = observation proposed_path = [kmer.fw for kmer in observation_kmers] @show "perfect read" elseif all(index -> index == 0, kmer_index) @show "read is trash" # generate a new read from the assembly to replace this one observation_path_length = length(observation_kmers) proposed_path = nothing proposed_path_size_discrepancy = Inf starting_kmer_indices = StatsBase.sample(1:Graphs.nv(graph), min(10, Graphs.nv(graph)), replace=false) for starting_kmer_index in starting_kmer_indices starting_kmer_index = rand(1:Graphs.nv(graph)) @show starting_kmer_index starting_kmer = graph.vprops[starting_kmer_index][:kmer] if rand(Bool) starting_kmer = BioSequences.reverse_complement(starting_kmer) end candidate_path = take_a_walk(graph, edge_likelihoods, kmers, starting_kmer, observation_path_length) discrepancy = abs(length(candidate_path) - observation_path_length) @show discrepancy if discrepancy < proposed_path_size_discrepancy proposed_path = candidate_path proposed_path_size_discrepancy = discrepancy if proposed_path_size_discrepancy == 0 break end end end else if opening_missing_kmer_path_index == 1 opening_solid_kmer = nothing # closing_solid_kmer_path_index = findnext(map(x -> x != 0, kmer_index), opening_missing_kmer_path_index+1) elseif opening_missing_kmer_path_index != nothing opening_solid_kmer_path_index = opening_missing_kmer_path_index - 1 opening_solid_kmer = observation_kmers[opening_solid_kmer_path_index].fw end if opening_missing_kmer_path_index == nothing closing_solid_kmer_path_index = findnext(map(x -> x != 0, kmer_index), 1) else closing_solid_kmer_path_index = findnext(map(x -> x != 0, kmer_index), opening_missing_kmer_path_index+1) end if closing_solid_kmer_path_index == nothing @show "end is open" closing_solid_kmer = nothing else closing_solid_kmer = observation_kmers[closing_solid_kmer_path_index].fw end iterations = 0 while (opening_missing_kmer_path_index != nothing) && (iterations < 5) iterations += 1 @show iterations if (opening_solid_kmer == nothing) && (closing_solid_kmer != nothing) @show "missing opening" @show closing_solid_kmer inverted_closing_solid_kmer = BioSequences.reverse_complement(closing_solid_kmer) @show inverted_closing_solid_kmer walk_length = closing_solid_kmer_path_index chosen_walk = take_a_walk(graph, edge_likelihoods, kmers, inverted_closing_solid_kmer, walk_length) chosen_walk = reverse(BioSequences.reverse_complement.(chosen_walk)) @show chosen_walk @show closing_solid_kmer_path_index @assert observation_kmers[closing_solid_kmer_path_index].fw == last(chosen_walk) proposed_path = chosen_walk opening_missing_kmer_path_index = findnext(map(x -> x == 0, kmer_index), closing_solid_kmer_path_index+1) if opening_missing_kmer_path_index == 1 opening_solid_kmer = nothing elseif opening_missing_kmer_path_index != nothing opening_solid_kmer_path_index = opening_missing_kmer_path_index - 1 opening_solid_kmer = observation_kmers[opening_solid_kmer_path_index].fw closing_solid_kmer_path_index = findnext(map(x -> x != 0, kmer_index), opening_missing_kmer_path_index+1) if closing_solid_kmer_path_index == nothing closing_solid_kmer = nothing else closing_solid_kmer = observation_kmers[closing_solid_kmer_path_index].fw end end end if (opening_solid_kmer != nothing) && (closing_solid_kmer == nothing) @show "missing closing" walk_length = length(observation_kmers) - opening_solid_kmer_path_index+1 chosen_walk = take_a_walk(graph, edge_likelihoods, kmers, opening_solid_kmer, walk_length) @show chosen_walk @assert observation_kmers[opening_solid_kmer_path_index].fw == first(chosen_walk) proposed_path = getproperty.(observation_kmers[length(proposed_path)+1:opening_solid_kmer_path_index], :fw) # proposed_path = append!(proposed_path, chosen_walk[2:end]) opening_missing_kmer_path_index = nothing end while (opening_solid_kmer != nothing) && (closing_solid_kmer != nothing) @show "bubble!" @show opening_solid_kmer @show closing_solid_kmer opening_solid_kmer_index = kmer_index[opening_solid_kmer_path_index] closing_solid_kmer_index = kmer_index[closing_solid_kmer_path_index] ##################################################################################### # DETERMINE PATH LENGTH ##################################################################################### # by default, we're assuming that we want to walk the same distance as our bubble + 1 bubble_path_length = closing_solid_kmer_path_index - opening_solid_kmer_path_index + 1 if opening_solid_kmer_index != closing_solid_kmer_index path = Graphs.a_star(graph, opening_solid_kmer_index, closing_solid_kmer_index) normalized_path = Int[path[1].src, [edge.dst for edge in path]...] shortest_paths = [normalized_path] else neighbors = Graphs.neighbors(graph, opening_solid_kmer_index) shortest_paths = Vector{Vector{Int}}() for neighbor in neighbors path = Graphs.a_star(graph, neighbor, closing_solid_kmer_index) normalized_path = Int[path[1].src, [edge.dst for edge in path]...] push!(shortest_paths, normalized_path) end end # but if our bubble is a deletion, then walking that length may not get us across to the other side # so we also determine the longest, shortest path longest_shortest_walk_length = maximum(length.(shortest_paths)) longest_walk_length = max(bubble_path_length, longest_shortest_walk_length) walk_length = Int(ceil(longest_walk_length * 1.1)) walks = Vector{Int}[] for i in 1:3 println("round $i") walks = [take_a_walk(graph, edge_likelihoods, kmers, opening_solid_kmer, walk_length)] while allunique(walks) push!(walks, take_a_walk(graph, edge_likelihoods, kmers, opening_solid_kmer, walk_length)) end @show walks walks = filter(path -> closing_solid_kmer in path, walks) @show walks if isempty(walks) walk_length = Int(ceil(walk_length * 1.1)) else # done! break end end if !isempty(walks) # selected_path = rand(walks) @show candidate_paths = sort(collect(StatsBase.countmap(walks)), by=x->x[2], rev=true) selected_path = first(first(candidate)paths) # first() @show selected_path selected_path_closing_solid_kmer_intersects = findall(kmer -> kmer == closing_solid_kmer, selected_path) if length(selected_path_closing_solid_kmer_intersects) > 1 selected_path_closing_solid_kmer_intersect = 0 d = Inf for i in selected_path_closing_solid_kmer_intersects candidate_distance = abs(i - walk_length) if candidate_distance < d d = candidate_distance selected_path_closing_solid_kmer_intersect = i end end else selected_path_closing_solid_kmer_intersect = first(selected_path_closing_solid_kmer_intersects) end selected_path = selected_path[1:selected_path_closing_solid_kmer_intersect] @show selected_path proposed_path = getproperty.(observation_kmers[length(proposed_path)+1:opening_solid_kmer_path_index], :fw) @show proposed_path append!(proposed_path, selected_path[2:end]) @show proposed_path opening_missing_kmer_path_index = findnext(map(x -> x == 0, kmer_index), closing_solid_kmer_path_index+1) @show opening_missing_kmer_path_index if opening_missing_kmer_path_index == nothing #done opening_solid_kmer = nothing elseif opening_missing_kmer_path_index == 1 opening_solid_kmer = nothing elseif opening_missing_kmer_path_index != nothing opening_solid_kmer_path_index = opening_missing_kmer_path_index - 1 opening_solid_kmer = observation_kmers[opening_solid_kmer_path_index].fw closing_solid_kmer_path_index = findnext(map(x -> x != 0, kmer_index), opening_missing_kmer_path_index+1) if closing_solid_kmer_path_index == nothing closing_solid_kmer = nothing else closing_solid_kmer = observation_kmers[closing_solid_kmer_path_index].fw end end else @show "need to bump out" if opening_missing_kmer_path_index >= 2 opening_missing_kmer_path_index -= 1 opening_solid_kmer = nothing # this should now break out to larger while loop end # push out closing node and try again closing_solid_kmer_path_index = findnext(map(x -> x != 0, kmer_index), closing_solid_kmer_path_index+1) if closing_solid_kmer_path_index == nothing @show "end is open" closing_solid_kmer = nothing else closing_solid_kmer = observation_kmers[closing_solid_kmer_path_index].fw end end end end if closing_solid_kmer_path_index != nothing append!(proposed_path, getproperty.(observation_kmers[closing_solid_kmer_path_index+1:end], :fw)) end end # - proposed_seq = BioSequences.LongDNASeq(first(proposed_path)) for kmer in proposed_path[2:end] push!(proposed_seq, last(kmer)) end proposed_seq new_alignment = BioAlignments.pairalign(BioAlignments.LevenshteinDistance(), proposed_seq, genome) reverse_new_alignment = BioAlignments.pairalign(BioAlignments.LevenshteinDistance(), BioSequences.reverse_complement(proposed_seq), genome) if reverse_new_alignment.value < new_alignment.value # observation = BioSequences.reverse_complement!(observation) new_alignment = reverse_new_alignment @show "flipping" end if new_alignment.value <= alignment.value @show "newer is equivalent or better" @show new_alignment.value, alignment.value else @show "newer is worse, keep original" @show new_alignment.value, alignment.value end
docs/_src/5.Development/2021-09-11-annealing-correction-L10-K7.ipynb
# ##### Copyright 2020 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # simple_min_cost_flow_program # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/graph/simple_min_cost_flow_program.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/ortools/graph/samples/simple_min_cost_flow_program.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010-2018 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START program] """From <NAME>., 'Applied Mathematical Programming', figure 8.1.""" # [START import] from ortools.graph import pywrapgraph # [END import] """MinCostFlow simple interface example.""" # [START data] # Define four parallel arrays: sources, destinations, capacities, # and unit costs between each pair. For instance, the arc from node 0 # to node 1 has a capacity of 15. start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4] end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2] capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5] unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3] # Define an array of supplies at each node. supplies = [20, 0, 0, -5, -15] # [END data] # [START constraints] # Instantiate a SimpleMinCostFlow solver. min_cost_flow = pywrapgraph.SimpleMinCostFlow() # Add each arc. for arc in zip(start_nodes, end_nodes, capacities, unit_costs): min_cost_flow.AddArcWithCapacityAndUnitCost(arc[0], arc[1], arc[2], arc[3]) # Add node supplies. for count, supply in enumerate(supplies): min_cost_flow.SetNodeSupply(count, supply) # [END constraints] # [START solve] # Find the min cost flow. solve_status = min_cost_flow.Solve() # [END solve] # [START print_solution] if solve_status == min_cost_flow.OPTIMAL: print('Minimum cost: ', min_cost_flow.OptimalCost()) print('') print(' Arc Flow / Capacity Cost') for i in range(min_cost_flow.NumArcs()): cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i) print('%1s -> %1s %3s / %3s %3s' % (min_cost_flow.Tail(i), min_cost_flow.Head(i), min_cost_flow.Flow(i), min_cost_flow.Capacity(i), cost)) else: print('Solving the min cost flow problem failed. Solver status: ', solve_status) # [END print_solution]
examples/notebook/graph/simple_min_cost_flow_program.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 整数索引 import pandas as pd import numpy as np ser = pd.Series(np.arange(3.)) ser ser2 = pd.Series(np.arange(3.), index=['a', 'b', 'c']) ser2[-1] ser[:1] ser.loc[:1] ser.iloc[:1] ser.loc[1:] ser.iloc[1:] # ## 算数运算和数据对齐 s1 = pd.Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e']) s2 = pd.Series([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g']) s1 s2 s1 + s2 # 自动的数据对齐操作在不重叠的索引出引入了NA值.缺失值会在算数运算过程中传播 # ##### 对于DataFrame,对齐操作会同时发生在行和列上,把他们相加后会返回一个新的DataFrame,其索引和列为原来那两个DataFrame的并集 df1 = pd.DataFrame({'A': [1, 2]}) df2 = pd.DataFrame({'B': [3, 4]}) df1 df2 df1 - df2 # #### 在算数方法中填充值 df1 = pd.DataFrame(np.arange(12.).reshape(3, 4), columns=list('abcd')) df1
Three_Part_Moudule/Pandas/Pandas_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Environmental Mapping Playground import os import geemap import numpy as np import geopandas as gpd nh_center = [43.78699687528447, -71.51658744025995] nh_zoom = 8 # + # m1 = geemap.Map(center=nh_center, zoom=nh_zoom) # m1.add_basemap("HYBRID") # hydro_url = "https://basemap.nationalmap.gov/arcgis/services/USGSHydroCached/MapServer/WMSServer?" # m1.add_wms_layer(url=hydro_url, # layers="0", # Find all options in XML # name="USGS Hydro", # format="image/png", # shown=True) # m1 # + m = geemap.Map(center=nh_center, zoom=nh_zoom) m.add_basemap("HYBRID") m.add_basemap("NLCD 2016 CONUS Land Cover") m # - # ## NH Trails # + filepath = '/Users/heatherkusmierz/Documents/Files/Courses/Geography_Cartography/NH_Trails/GRANIT_20220201162014/' points_path = os.path.join(filepath, 'nhtrails_points.shp') lines_path = os.path.join(filepath, 'nhtrails.shp') # - nh_trails_pts = gpd.read_file(points_path) nh_trails = gpd.read_file(lines_path) nh_trails_pts.head() nh_trails_pts.info() nh_trails_pts.crs nh_trails.head() nh_trails.info() nh_trails.crs pub_land_url = "https://opendata.arcgis.com/datasets/ac1d1c9b7fb548dcaaa6bdb5c80b70d5_6.geojson" # + m2 = geemap.Map(center=nh_center, zoom=nh_zoom, ee_initialize=False) m2.add_basemap("HYBRID") m2 # - style = { "stroke": True, "color": "#000000", "weight": 2, "opacity": 1, "fill": True, "fillColor": "#0000ff", "fillOpacity": 0.4, } m2.add_geojson(pub_land_url, style=style, layer_name="NH Public/Cnsvn Land")
Env_Mapping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TensorFlow Dataset API # # **Learning Objectives** # 1. Learn how use tf.data to read data from memory # 1. Learn how to use tf.data in a training loop # 1. Learn how use tf.data to read data from disk # 1. Learn how to write production input pipelines with feature engineering (batching, shuffling, etc.) # # # In this notebook, we will start by refactoring the linear regression we implemented in the previous lab so that is takes its data from a`tf.data.Dataset`, and we will learn how to implement **stochastic gradient descent** with it. In this case, the original dataset will be synthetic and read by the `tf.data` API directly from memory. # # In a second part, we will learn how to load a dataset with the `tf.data` API when the dataset resides on disk. # + import json import math import os from pprint import pprint import numpy as np import tensorflow as tf print(tf.version.VERSION) # - # ## Loading data from memory # ### Creating the dataset # Let's consider the synthetic dataset of the previous section: N_POINTS = 10 X = tf.constant(range(N_POINTS), dtype=tf.float32) Y = 2 * X + 10 # We begin with implementing a function that takes as input # # # - our $X$ and $Y$ vectors of synthetic data generated by the linear function $y= 2x + 10$ # - the number of passes over the dataset we want to train on (`epochs`) # - the size of the batches the dataset (`batch_size`) # # and returns a `tf.data.Dataset`: # **Remark:** Note that the last batch may not contain the exact number of elements you specified because the dataset was exhausted. # # If you want batches with the exact same number of elements per batch, we will have to discard the last batch by # setting: # # ```python # dataset = dataset.batch(batch_size, drop_remainder=True) # ``` # # We will do that here. def create_dataset(X, Y, epochs, batch_size): dataset = tf.data.Dataset.from_tensor_slices((X, Y)) dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True) return dataset # Let's test our function by iterating twice over our dataset in batches of 3 datapoints: # + BATCH_SIZE = 3 EPOCH = 2 dataset = create_dataset(X, Y, epochs=1, batch_size=3) for i, (x, y) in enumerate(dataset): print("x:", x.numpy(), "y:", y.numpy()) assert len(x) == BATCH_SIZE assert len(y) == BATCH_SIZE assert EPOCH # - # ### Loss function and gradients # The loss function and the function that computes the gradients are the same as before: # + def loss_mse(X, Y, w0, w1): Y_hat = w0 * X + w1 errors = (Y_hat - Y) ** 2 return tf.reduce_mean(errors) def compute_gradients(X, Y, w0, w1): with tf.GradientTape() as tape: loss = loss_mse(X, Y, w0, w1) return tape.gradient(loss, [w0, w1]) # - # ### Training loop # The main difference now is that now, in the traning loop, we will iterate directly on the `tf.data.Dataset` generated by our `create_dataset` function. # # We will configure the dataset so that it iterates 250 times over our synthetic dataset in batches of 2. # + EPOCHS = 250 BATCH_SIZE = 2 LEARNING_RATE = 0.02 MSG = "STEP {step} - loss: {loss}, w0: {w0}, w1: {w1}\n" w0 = tf.Variable(0.0) w1 = tf.Variable(0.0) dataset = create_dataset(X, Y, epochs=EPOCHS, batch_size=BATCH_SIZE) for step, (X_batch, Y_batch) in enumerate(dataset): dw0, dw1 = compute_gradients(X_batch, Y_batch, w0, w1) w0.assign_sub(dw0 * LEARNING_RATE) w1.assign_sub(dw1 * LEARNING_RATE) if step % 100 == 0: loss = loss_mse(X_batch, Y_batch, w0, w1) print(MSG.format(step=step, loss=loss, w0=w0.numpy(), w1=w1.numpy())) assert loss < 0.0001 assert abs(w0 - 2) < 0.001 assert abs(w1 - 10) < 0.001 # - # ## Loading data from disk # ### Locating the CSV files # # We will start with the **taxifare dataset** CSV files that we wrote out in a previous lab. # # The taxifare datast files been saved into `../data`. # # Check that it is the case in the cell below, and, if not, regenerate the taxifare # dataset by running the provious lab notebook: # !ls -l ../data/taxi*.csv # ### Use tf.data to read the CSV files # # The `tf.data` API can easily read csv files using the helper function # # [tf.data.experimental.make_csv_dataset](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset) # # # If you have TFRecords (which is recommended), you may use # # [tf.data.experimental.make_batched_features_dataset](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_batched_features_dataset) # The first step is to define # # - the feature names into a list `CSV_COLUMNS` # - their default values into a list `DEFAULTS` CSV_COLUMNS = [ "fare_amount", "pickup_datetime", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "key", ] LABEL_COLUMN = "fare_amount" DEFAULTS = [[0.0], ["na"], [0.0], [0.0], [0.0], [0.0], [0.0], ["na"]] # Let's now wrap the call to `make_csv_dataset` into its own function that will take only the file pattern (i.e. glob) where the dataset files are to be located: # + def create_dataset(pattern): return tf.data.experimental.make_csv_dataset( pattern, 1, CSV_COLUMNS, DEFAULTS ) tempds = create_dataset("../data/taxi-train*") print(tempds) # - # Note that this is a prefetched dataset, where each element is an `OrderedDict` whose keys are the feature names and whose values are tensors of shape `(1,)` (i.e. vectors). # # Let's iterate over the two first element of this dataset using `dataset.take(2)` and let's convert them ordinary Python dictionary with numpy array as values for more readability: for data in tempds.take(2): pprint({k: v.numpy() for k, v in data.items()}) print("\n") # ### Transforming the features # What we really need is a dictionary of features + a label. So, we have to do two things to the above dictionary: # # 1. Remove the unwanted column "key" # 1. Keep the label separate from the features # # Let's first implement a funciton that takes as input a row (represented as an `OrderedDict` in our `tf.data.Dataset` as above) and then returns a tuple with two elements: # # * The first element beeing the same `OrderedDict` with the label dropped # * The second element beeing the label itself (`fare_amount`) # # Note that we will need to also remove the `key` and `pickup_datetime` column, which we won't use. # + UNWANTED_COLS = ["pickup_datetime", "key"] def features_and_labels(row_data): label = row_data.pop(LABEL_COLUMN) features = row_data for unwanted_col in UNWANTED_COLS: features.pop(unwanted_col) return features, label # - # Let's iterate over 2 examples from our `tempds` dataset and apply our `feature_and_labels` # function to each of the examples to make sure it's working: for row_data in tempds.take(2): features, label = features_and_labels(row_data) pprint(features) print(label, "\n") assert UNWANTED_COLS[0] not in features.keys() assert UNWANTED_COLS[1] not in features.keys() assert label.shape == [1] # ### Batching # Let's now refactor our `create_dataset` function so that it takes an additional argument `batch_size` and batch the data correspondingly. We will also use the `features_and_labels` function we implemented in order for our dataset to produce tuples of features and labels. def create_dataset(pattern, batch_size): dataset = tf.data.experimental.make_csv_dataset( pattern, batch_size, CSV_COLUMNS, DEFAULTS ) return dataset.map(features_and_labels) # Let's test that our batches are of the right size: # + BATCH_SIZE = 2 tempds = create_dataset("../data/taxi-train*", batch_size=2) for X_batch, Y_batch in tempds.take(2): pprint({k: v.numpy() for k, v in X_batch.items()}) print(Y_batch.numpy(), "\n") assert len(Y_batch) == BATCH_SIZE # - # ### Shuffling # # When training a deep learning model in batches over multiple workers, it is helpful if we shuffle the data. That way, different workers will be working on different parts of the input file at the same time, and so averaging gradients across workers will help. Also, during training, we will need to read the data indefinitely. # Let's refactor our `create_dataset` function so that it shuffles the data, when the dataset is used for training. # # We will introduce a additional argument `mode` to our function to allow the function body to distinguish the case # when it needs to shuffle the data (`mode == "train"`) from when it shouldn't (`mode == "eval"`). # # Also, before returning we will want to prefetch 1 data point ahead of time (`dataset.prefetch(1)`) to speedup training: def create_dataset(pattern, batch_size=1, mode="eval"): dataset = tf.data.experimental.make_csv_dataset( pattern, batch_size, CSV_COLUMNS, DEFAULTS ) dataset = dataset.map(features_and_labels).cache() if mode == "train": dataset = dataset.shuffle(1000).repeat() # take advantage of multi-threading; 1=AUTOTUNE dataset = dataset.prefetch(1) return dataset # Let's check that our function work well in both modes: tempds = create_dataset("../data/taxi-train*", 2, "train") print(list(tempds.take(1))) tempds = create_dataset("../data/taxi-valid*", 2, "eval") print(list(tempds.take(1))) # In the next notebook, we will build the model using this input pipeline. # Copyright 2021 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
notebooks/introduction_to_tensorflow/solutions/2a_dataset_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WUM 2020: Projekt II, Milestone I # ### <NAME>, <NAME> & <NAME> # ## Wstępna eksploracja # **Online Shoppers Purchasing Intention** to prawdopodobnie fascynujący zbiór danych zawierający informacje o wizytach na pewnym sklepie internetowym i ich właściwościach. Ale czy aby na pewno nazwa datasetu nie jest myląca i tak naprawdę nie chodzi w nim o coś zupełnie innego? # Sprawdźmy to odczytując naszą ramkę! # # Zbiór nie jest zbyt duży - jego *.csv* waży zaledwie 1 MB. import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import random import warnings warnings.simplefilter('ignore') df = pd.read_csv("online_shoppers_intention.csv") df.shape random.seed(123) df.sample(10) # Nasz dataset składa się z 12 330 obserwacji, każda zawierająca w sobie 18 informacji. Jak możemy przeczytać w dokumentacji frame, poszczególne kolumny reprezentują kolejno: # # * *Administrative*, *Informational*, *ProductRelated* - liczba stron danych kategorii (administracyjne, informacyjne, związane z produktem) odwiedzonych podczas sesji przez użytkownika, którego dotyczy dana obserwacja, # * *Administrative_Duration*, *Informational_Duration*, *ProductRelated_Duration* - całkowite czasy spędzone na stronach o kategoriach wyżej (w sekundach), # * *BounceRates*, pl: współczynnik odrzuceń - procent stron odwiedzonych podczas sesji, po których nie następuje przekierowanie do kolejnych. Miara ta jest wyliczana przez *Google Analytics*, # * *ExitRates*, pl: współczynnik wyjść - stosunek osób, które opuściły serwis z danej podstrony do wszystkich, które ją odwiedziły (także wyznaczona przez *Google Analytics), # * *PageValue* - średnia wartość strony internetowej, którą odwiedził użytkownik przed zrealizowaniem transakcji e-commerce. Informuje, które konkretne strony witryny mają największą wartość. Na przykład strona produktu w witrynie e-commerce będzie zwykle miała wyższą wartość strony niż strona zasobu, # * *SpecialDay* - liczba mówiąca o bliskości czasu odwiedzin strony do określonego dnia specjalnego (np. <NAME>, Walentynki), w którym sesje bardziej prawdopodobnie zostaną sfinalizowane transakcją. Wartość tego atrybutu określa się, biorąc pod uwagę dynamikę handlu elektronicznego, taką jak czas między datą zamówienia a datą dostawy. Na przykład dla Walentynek ta wartość przyjmuje niezerową wartość między 2. lutego a 12. lutego, zero przed tą datą i po tej dacie, chyba że zbliża się kolejny specjalny dzień, a jej maksymalna wartość przychodzi na 8. lutego, # * *Month* - miesiąc, # * *OperatingSystems* - id używanego systemu operacyjnego, # * *Browser* - identyfikator przeglądarki internetowej, # * *Region* - informacja o regionie surfowania po internecie, # * *TrafficType* - id typu przeglądania sieci, # * *Returning_Visitor* - odpowiedź na pytanie czy użytkownik odwiedzał już stronę czy jest na niej po raz pierwszy, # * *Weekend* - jak sama nazwa wskazuje, # * *Revenue* - informacja o tym czy przeglądanie oferty zakończyło się zakupem. # Oczywiście jakbyśmy modelowali algorytm uczenia nadzorowanego, najmądrzej byłoby użyć 17 zmiennych do przewidzenia czy ogląd strony zakończy się transakcją - wobec tego *revenue* byłoby oczywistą zmienną celu. # W informacjach o naszej ramce możemy także przeczytać, że wszystkie wiersze dotyczą innych klientów, co w efekcie skutkuje lepszą jakością danych - zauważmy na przykład, że gdyby niektóre obserwacje dotyczyły tej samej osoby, to biorąc pod uwagę indywidualne podejście każdego użytkownika, a także specyficzne nabyte nawyki, wnioski mogłyby być przekłamane ze względu na wzmożoną uwagę tych właśnie osobników manipulującą reprezentatywne wartości dla ogółu społeczeństwa. # ## Eksploracja zbiorcza # ### Typy danych i braki # Sprawdźmy ile wierszy ma jakiekolwiek braki, które należałoby imputować. len(df.dropna()) - len(df) # Co za szczęście! Kompletne dane! Jak prezentują się typy danych? df.dtypes # Według opisu zbiór danych składa się z 10 atrybutów numerycznych i 8 kategorii. Jednak zmienne *OperatingSystems*, *Browser*, *Region* i *TrafficType* są przedstawione jako identyfikatory liczbowe. Lepiej będzie zmienić je na faktory, gdyż bez sensu porównywać je między sobą numerycznie. df['OperatingSystems'] = df['OperatingSystems'].astype(object) df['Browser'] = df['Browser'].astype(object) df['Region'] = df['Region'].astype(object) df['TrafficType'] = df['TrafficType'].astype(object) df.dtypes # Miesiące także możemy przedstawić jako numery - wciąż będziemy je rozróżniać jako zmienne kategoryczne, lecz dzięki temu ich obraz będzie bardziej przystępny, a także ramka będzie ważyć mniej i szybciej się przetwarzać (każda kategoria będzie krótsza). df['Month'].unique() # O proszę! Nie ma żadnych obserwacji ze stycznia i kwietnia. months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] for i in range(len(months)): df[['Month']] = df[['Month']].replace(months[i], i + 1) df[['Month']].sample(5) df['Month'] = df['Month'].astype(object) # ### Rozkłady zmiennych numerycznych # Przyjrzyjmy się typowym wartościom tabeli. df.describe() # Co możem wysnuć z ramki średnich minimów, maximów, odchyleń stadardowych i kwartyli? # # 1. Użytkownicy najwięcej czasu spędzili na stronach związanych z produktem - zwyklee kilkanaście minut, odwiedzając standardowo paręnaście do kilkudziesięciu takich witryn. O wiele mniej w przypadku platorm administracyjnych i informacyjnych - tam najczęściej blisko minuty, po parę adresów. Jeden użytkownik spędził prawie 20 godzin na stronach powiązanych z produktem!! Czy to outlier, który warto by było usunąć? Raczej nie, szaleńcy są wśród nas, # 2. Trzeba mieć na uwadze, że współczynniki odrzuceń i wyjść oscylują wokół kilku procent, # 3. Typowy wskaźnik *SpecialDay* to także kilka procent (tu 6), lecz wariancja jest już nieco większa i wynosi 0.2. # 4. Użytkownicy zdecydowanie najczęściej używają systemu operacyjnego i przeglądarki o id 1 i 2, choć tych pierwszych w bazie różnych jest aż 8, a drugich - 13. Ciekawe co reprezentują dane identyfikatory... *Google Chrome* i *Safari* oraz *Windows* i *Linux*? A może jakiś telefoniczny *Android* i *Apple*? Kto to wie. # Zwizualizujmy nasze zmienne numeryczne. # # Najpierw - te odpowiadające za obecność na innych stronach. other_pages_columns = df.columns[0:6] other_pages_columns # + f, axes = plt.subplots(2, 3, figsize=(13, 8)) for i in range(0, len(other_pages_columns)): sns.distplot(df[other_pages_columns[i]].dropna(),color="purple", hist=True, kde=True, kde_kws={'shade': True, 'linewidth': 3}, ax=axes[i//3, i%3]).set_title('Rozkład zmiennej '+ other_pages_columns[i]) plt.tight_layout(); # - # Jak widać mimo małego zakresu absolutnej większości obserwacji (głównie pojawia się wartość "0"), outliery mocno odbiegające od standardowych wartości robią swoje. Trzeba je mieć na uwadze. # # Jak prezentują się miary udostępnione przez *Google Analytics*? rates = df.columns[6:9] rates # + f, axes = plt.subplots(1, 3, figsize=(13, 4)) for i in range(0, len(rates)): sns.distplot(df[rates[i]].dropna(),color="purple", hist=True, kde=True, kde_kws={'shade': True, 'linewidth': 3}, ax=axes[i%3]).set_title('Rozkład zmiennej '+ rates[i]) plt.tight_layout(); # - # Rzeczywiście oba wskaźniki oscylują wokół paru procent, jednak potrafią dojść nawet do 20%. # # Ciekawą kolumną jest *PageValues* - tu niemalże zawsze mamy wartość kilka, lecz skrajne witryny osiągają nawet 250. # # Zobaczmy jeszcze wartości dla *SpecialDay*. print("Zerowych wartości \"Special Day\": ",round(sum(df['SpecialDay'] == 0) / len(df) * 100, 2), "%", sep = "") sns.countplot(data=df, x="SpecialDay", palette="PuRd").set_title('Rozkład zmiennej SpecialDay') plt.tight_layout(); # Absolutna większość dni zdecydowania nie jest "specjalna" - dlatego na szczególną uwagę zasługują te o wartości niezerowej. # ### Rozkłady zmiennych kategorycznych categorical_colnames = df.columns[df.dtypes == "object"] categorical_colnames # + f, axes=plt.subplots(2, 3, figsize=(13, 8)) for i in range(0, len(categorical_colnames)): sns.countplot(data=df, x=categorical_colnames[i], ax=axes[i//3, i%3], palette="PuRd").set_title('Rozkład zmiennej '+ categorical_colnames[i]) for ax in f.axes: ax.set_ylabel('') plt.tight_layout(); # - # Wnioski: # # 1. Nie ma równomiernego rozkładu miesięcy - nie mówiąc o tym, że nie mamy żadnych danych ze stycznia i kwietnia, przeważają chociażby maj i listopad (po ponad 3000 obserwacji, razem więcej niż 50% wszystkich), # 2. Zaiste przeważa drugi system operacyjny, ale jest także dużo 1. i 3. Drobna liczba systemów o identyfikatorach 5-8, # 3. Ludzie przeważnie używają 2. przeglądarki, trochę mniej 1., kilkaset osób 4. i 5. i bardzo malutko pozostałych, # 4. Przeważa region pierwszy (blisko 40% wszystkich danych), blisko 20% z regionu 3, inne podobnie, z najmniejszą kilku-procentową częstością regionu trzeciego, # 5. Mamy 20 typów ruchu po sieci, absolutna większość to pierwsze 3, a z posotałych głównie 4. i 13., # 6. Ponad 4/5 użytkowników powraca na serwis, a nie jest na nim po raz pierwszy - jest jeszcze mała liczba obserwacji *Other* - czy to swego rodzaju *NA*? (df[['VisitorType']] == 'Other').sum() # Jest ich dokładnie 85. # Teraz zobaczmy jak wyglądają rozkłady zmiennych binarnych. binary_colnames = df.columns[df.dtypes == "bool"] binary_colnames # + f, axes=plt.subplots(1, 2, figsize=(10, 5)) for i in range(0, len(binary_colnames)): sns.countplot(data=df, x=binary_colnames[i], ax=axes[i%2], palette="PuRd").set_title('Rozkład zmiennej '+ binary_colnames[i]) for ax in f.axes: ax.set_ylabel('') plt.tight_layout(); # - sum(df['Weekend'] == True)/2, sum(df['Weekend'] == False)/5 # Wnioski: # # 1. Znacznie więcej jest wejść w tygodniu niż w weekend, nawet jeśli patrzymy na średnią liczbę wejść na jeden dzień, # 2. Tylko mały procent osób odwiedzających stronę w końcu decyduje się na kupno. # # Obie zależności mogą wynikać z tego, że dużo ludzi przegląda strony internetowe w pracy bez zamiaru kupna, tylko żeby poszukać informacji. # ## Relacje między kolumnami # Na początku wyświetlmy macierz korelacji. corr = df.corr() corr # + g = sns.heatmap(corr, vmax=1, vmin=-1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True, fmt='.2f', cmap='icefire') sns.despine() g.figure.set_size_inches(18,12) plt.show() # - # Bez zaskoczeń możemy zaobserwować dużą korelację między liczbą innych stron odwiedzonych przez użytkowników a czasem na nich spędzonych - w innym wypadku byłoby to podejrzane. # Możemy także zaobserwować dużą korelację między ilościami wejść na inne strony i czasie na nich spędzonych niezależnie od kategorii. Okazuje się także, że oba wskaźniki zaproponowane przez *Googla* są do siebie bardzo zbliżone. # # Mamy też potwierdzenie definicji *PageValues* - rzeczywiście owy wynik jest związany z informacją czy z tej strony nastąpiła transakcja. # ### Zależności między wybranymi zmiennymi # *ExitRates* oraz *BounceRates* # + sns.set(style="darkgrid") tips = sns.load_dataset("tips") g = sns.jointplot("BounceRates", "ExitRates", data=df, kind="reg", truncate=False, color="m",height=7) # - # Rzeczywiście widać tu silną zależność. # *ProductRelated* oraz *ProductRelated_Duration* tips = sns.load_dataset("tips") g = sns.jointplot("ProductRelated", "ProductRelated_Duration", data=df, kind="reg", truncate=False, color="r",height=7)
Projekty/Projekt2/Grupa2/KosternaMrozPodsiad/Projekt2_KM1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.png) # # Deploying a web service to Azure Kubernetes Service (AKS) # This notebook shows the steps for deploying a service: registering a model, provisioning a cluster with ssl (one time action), and deploying a service to it. # We then test and delete the service, image and model. from azureml.core import Workspace from azureml.core.compute import AksCompute, ComputeTarget from azureml.core.webservice import Webservice, AksWebservice from azureml.core.model import Model import azureml.core print(azureml.core.VERSION) # # Get workspace # Load existing workspace from the config file info. # + from azureml.core.workspace import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # # Register the model # Register an existing trained model, add descirption and tags. # + #Register the model from azureml.core.model import Model model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file model_name = "sklearn_model", # this is the name the model is registered as tags = {'area': "diabetes", 'type': "regression"}, description = "Ridge regression model to predict diabetes", workspace = ws) print(model.name, model.description, model.version) # - # # Create the Environment # Create an environment that the model will be deployed with # + from azureml.core import Environment from azureml.core.conda_dependencies import CondaDependencies conda_deps = CondaDependencies.create(conda_packages=['numpy', 'scikit-learn==0.19.1', 'scipy'], pip_packages=['azureml-defaults', 'inference-schema']) myenv = Environment(name='myenv') myenv.python.conda_dependencies = conda_deps # - # #### Use a custom Docker image # # You can also specify a custom Docker image to be used as base image if you don't want to use the default base image provided by Azure ML. Please make sure the custom Docker image has Ubuntu >= 16.04, Conda >= 4.5.\* and Python(3.5.\* or 3.6.\*). # # Only supported with `python` runtime. # ```python # # use an image available in public Container Registry without authentication # myenv.docker.base_image = "mcr.microsoft.com/azureml/o16n-sample-user-base/ubuntu-miniconda" # # # or, use an image available in a private Container Registry # myenv.docker.base_image = "myregistry.azurecr.io/mycustomimage:1.0" # myenv.docker.base_image_registry.address = "myregistry.azurecr.io" # myenv.docker.base_image_registry.username = "username" # myenv.docker.base_image_registry.password = "password" # ``` # # Write the Entry Script # Write the script that will be used to predict on your model # + # %%writefile score_ssl.py import os import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from inference_schema.schema_decorators import input_schema, output_schema from inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType def init(): global model # AZUREML_MODEL_DIR is an environment variable created during deployment. # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION) # For multiple models, it points to the folder containing all deployed models (./azureml-models) model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) standard_sample_input = {'a': 10, 'b': 9, 'c': 8, 'd': 7, 'e': 6, 'f': 5, 'g': 4, 'h': 3, 'i': 2, 'j': 1 } standard_sample_output = {'outcome': 1} @input_schema('param', StandardPythonParameterType(standard_sample_input)) @output_schema(StandardPythonParameterType(standard_sample_output)) def run(param): try: raw_data = [param['a'], param['b'], param['c'], param['d'], param['e'], param['f'], param['g'], param['h'], param['i'], param['j']] data = numpy.array([raw_data]) result = model.predict(data) return { 'outcome' : result[0] } except Exception as e: error = str(e) return error # - # # Create the InferenceConfig # Create the inference config that will be used when deploying the model # + from azureml.core.model import InferenceConfig inf_config = InferenceConfig(entry_script='score_ssl.py', environment=myenv) # - # # Provision the AKS Cluster with SSL # This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it. # # See code snippet below. Check the documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-secure-web-service) for more details # + # Use the default configuration (can also provide parameters to customize) provisioning_config = AksCompute.provisioning_configuration() # Leaf domain label generates a name using the formula # "<leaf-domain-label>######.<azure-region>.cloudapp.azure.net" # where "######" is a random series of characters provisioning_config.enable_ssl(leaf_domain_label = "contoso", overwrite_existing_domain = True) aks_name = 'my-aks-ssl-1' # Create the cluster aks_target = ComputeTarget.create(workspace = ws, name = aks_name, provisioning_configuration = provisioning_config) # - # %%time aks_target.wait_for_completion(show_output = True) print(aks_target.provisioning_state) print(aks_target.provisioning_errors) # # Deploy web service to AKS # + tags=["sample-deploy-to-aks"] # %%time aks_config = AksWebservice.deploy_configuration() aks_service_name ='aks-service-ssl-1' aks_service = Model.deploy(workspace=ws, name=aks_service_name, models=[model], inference_config=inf_config, deployment_config=aks_config, deployment_target=aks_target, overwrite=True) aks_service.wait_for_deployment(show_output = True) print(aks_service.state) # - # # Test the web service using run method # We test the web sevice by passing data. # Run() method retrieves API keys behind the scenes to make sure that call is authenticated. # + # %%time import json standard_sample_input = json.dumps({'param': {'a': 10, 'b': 9, 'c': 8, 'd': 7, 'e': 6, 'f': 5, 'g': 4, 'h': 3, 'i': 2, 'j': 1 }}) aks_service.run(input_data=standard_sample_input) # - # # Clean up # Delete the service, image and model. # %%time aks_service.delete() model.delete()
how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="M2JSq96wWrsR" import os import re import cv2 # opencv library import numpy as np from os.path import isfile, join import matplotlib.pyplot as plt # + id="Q8CdY8VbW-ra" # get file names of the frames col_frames = os.listdir('/content/drive/My Drive/Data/frames/') # sort file names col_frames.sort(key=lambda f: int(re.sub('\D', '', f))) # empty list to store the frames col_images=[] for i in col_frames: # read the frames img = cv2.imread('/content/drive/My Drive/Data/frames/'+i) # append the frames to the list col_images.append(img) # + id="LWHXp05IccpN" outputId="64c7d4d4-e885-4c92-89f5-b040862eed09" colab={"base_uri": "https://localhost:8080/", "height": 487} # plot 13th frame i = 13 for frame in [i, i+1]: plt.imshow(cv2.cvtColor(col_images[frame], cv2.COLOR_BGR2RGB)) plt.title("frame: "+str(frame)) plt.show() # + id="JDOOU_YGd-XQ" outputId="29dddc18-0631-4644-e8e6-15bb1d777ea3" colab={"base_uri": "https://localhost:8080/", "height": 240} # convert the frames to grayscale grayA = cv2.cvtColor(col_images[i], cv2.COLOR_BGR2GRAY) grayB = cv2.cvtColor(col_images[i+1], cv2.COLOR_BGR2GRAY) # plot the image after frame differencing plt.imshow(cv2.absdiff(grayB, grayA), cmap = 'gray') plt.show() # + id="R9tCvKb3eaQs" outputId="7ea99fbd-aca6-46bf-ca86-f69dc5ef0a6f" colab={"base_uri": "https://localhost:8080/", "height": 240} diff_image = cv2.absdiff(grayB, grayA) # perform image thresholding ret, thresh = cv2.threshold(diff_image, 30, 255, cv2.THRESH_BINARY) # plot image after thresholding plt.imshow(thresh, cmap = 'gray') plt.show() # + id="av1Cxq3Zeim9" outputId="1fe98c3a-1f83-419a-c0cc-857fdd717635" colab={"base_uri": "https://localhost:8080/", "height": 240} # apply image dilation kernel = np.ones((3,3),np.uint8) dilated = cv2.dilate(thresh,kernel,iterations = 1) # plot dilated image plt.imshow(dilated, cmap = 'gray') plt.show() # + id="0O-em1Epequt" outputId="6cc3ceaf-bdf3-4e31-9b35-d1fb2ed139d0" colab={"base_uri": "https://localhost:8080/", "height": 240} # plot vehicle detection zone plt.imshow(dilated) cv2.line(dilated, (0, 80),(256,80),(100, 0, 0)) plt.show() # + id="fzneBxx2e0qF" # find contours contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) # + id="CL8nzswHe_uH" outputId="fd3bfe10-63fe-40b2-cf24-4f81bdbaabe3" colab={"base_uri": "https://localhost:8080/", "height": 34} valid_cntrs = [] for i,cntr in enumerate(contours): x,y,w,h = cv2.boundingRect(cntr) if (x <= 200) & (y >= 80) & (cv2.contourArea(cntr) >= 25): valid_cntrs.append(cntr) # count of discovered contours len(valid_cntrs) # + id="UauIeqybfuUu" outputId="222b3f66-e975-4dea-d44a-d6754e4e5142" colab={"base_uri": "https://localhost:8080/", "height": 240} dmy = col_images[13].copy() cv2.drawContours(dmy, valid_cntrs, -1, (127,200,0), 2) cv2.line(dmy, (0, 80),(256,80),(100, 255, 255)) plt.imshow(dmy) plt.show() # + id="731eBW6hfzwl" # kernel for image dilation kernel = np.ones((4,4),np.uint8) # font style font = cv2.FONT_HERSHEY_SIMPLEX # directory to save the ouput frames pathIn = ('/content/drive/My Drive/Data/frames/') for i in range(len(col_images)-1): # frame differencing grayA = cv2.cvtColor(col_images[i], cv2.COLOR_BGR2GRAY) grayB = cv2.cvtColor(col_images[i+1], cv2.COLOR_BGR2GRAY) diff_image = cv2.absdiff(grayB, grayA) # image thresholding ret, thresh = cv2.threshold(diff_image, 30, 255, cv2.THRESH_BINARY) # image dilation dilated = cv2.dilate(thresh,kernel,iterations = 1) # find contours contours, hierarchy = cv2.findContours(dilated.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) # shortlist contours appearing in the detection zone valid_cntrs = [] for cntr in contours: x,y,w,h = cv2.boundingRect(cntr) if (x <= 200) & (y >= 80) & (cv2.contourArea(cntr) >= 25): if (y >= 90) & (cv2.contourArea(cntr) < 40): break valid_cntrs.append(cntr) # add contours to original frames dmy = col_images[i].copy() cv2.drawContours(dmy, valid_cntrs, -1, (127,200,0), 2) cv2.putText(dmy, "vehicles detected: " + str(len(valid_cntrs)), (55, 15), font, 0.6, (0, 180, 0), 2) cv2.line(dmy, (0, 80),(256,80),(100, 255, 255)) cv2.imwrite(pathIn+str(i)+'.png',dmy) # + id="QD_85WHEgKI9" # specify video name pathOut = 'vehicle_detection_v3.mp4' # specify frames per second fps = 14.0 # + id="3_8G_raWjJ3r" outputId="7a48af07-a4dd-46cb-963e-b3d2b2a1e003" colab={"base_uri": "https://localhost:8080/", "height": 1000} import os os.chdir("/content/drive/My Drive") # !ls # + id="ak-3SlIbgZAV" frame_array = [] files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))] # + id="kAb2mvvVi-ew" files.sort(key=lambda f: int(re.sub('\D', '', f))) for i in range(len(files)): filename=pathIn + files[i] #read frames img = cv2.imread(filename) height, width, layers = img.shape size = (width,height) #inserting the frames into an image array frame_array.append(img) # + id="RNGdZ0uCkGTR" out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size) for i in range(len(frame_array)): # writing to a image array out.write(frame_array[i]) out.release() # + id="enpCvfLb4hlW" ##https://www.analyticsvidhya.com/blog/2020/04/vehicle-detection-opencv-python/ # + id="Is3QkUjJQwFu" outputId="2cab5461-7dd9-4dda-f1f0-1fde3be01dbf" colab={"resources": {"http://localhost:8080/content/drive/My": {"data": "<KEY>", "ok": false, "headers": [["content-length", "1449"], ["content-type", "text/html; charset=utf-8"]], "status": 404, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 843} # !pip install -U kora from kora.drive import upload_public url = upload_public("/content/drive/My Drive/Data/vehicle_detection_v3.mp4") # then display it from IPython.display import HTML HTML(f"""<video src={"/content/drive/My Drive/Data/vehicle_detection_v3.mp4"} width=1000 controls/>""") # + id="_DUd5LI_TIPm"
Vehicles Counting with Frame Differencing/Vehicle_Detection_Using_Frame_differencing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.9 64-bit (''simulation-framework'': pipenv)' # language: python # name: python36964bitsimulationframeworkpipenvbd7f75cb29004d9a9f3a9e2c739901c8 # --- # ## Sample Module with Sample ModuleState # # This notebook shows how to create a basic module that can be easily tested with the simulation # + import attr from simulation.module import Module, ModuleState from simulation.state import State @attr.s(kw_only=True) class SampleState(ModuleState): variable_name: int = attr.ib(default=0) class Sample(Module): name = 'sample' defaults = { 'ex_int': '0', 'ex_float': '0.0', 'ex_bool': 'False', 'ex_string': '', } StateClass = SampleState def initialize(self, state: State): # how to get different values from config.ini file state.sample.ex_int = self.config.getint('ex_int') state.sample.ex_float = self.config.getfloat('ex_float') state.sample.ex_bool = self.config.getboolean('ex_bool') state.sample.ex_string = self.config.get('ex_string') # see simulation.module to see various default functions whose behaviour can be overridden # + # see run in simulation/cli.py from math import ceil from simulation.config import SimulationConfig from simulation.solver import advance, finalize, initialize # any config files in config_files should be hardcoded to be a full local file path # edit config file referencing geometry.hdf5 to be the full local file path config_files = [] config = SimulationConfig(*config_files) config.add_module(Sample) target_time = 5 total = ceil(target_time / config.getfloat('simulation', 'time_step')) attr.set_run_validators(config.getboolean('simulation', 'validate')) def get_time(x): if x is None: return '0' return '%.2f' % x.time state = initialize(State.create(config)) states = advance(state, target_time) for _state in states: pass state = finalize(state) # Note: all files outputted will be in the same folder this notebook unless specified like below state.save('../simulation-final.pkl')
notebooks/sample.py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- library(tidyverse) df = read_csv("movies.csv") genre <- 'Comedy' year_info <- c(1900, 1980) count_per_genre <- function(genre = 'Comedy', year_info = c(1950,2010)) { # Function creates a plot of Number of movies produced vs. genre # in between years # # -------- # @param : # genre : provide a genre to highlight # year_info : provide a list/vector with 2 values # which give the start and end year to sort by # # -------- # @Return : # a plot of 'Genre' vs. 'Number of movies produced' # # -------- # @Example : # count_per_genre('Comedy', list(1990,2010)) # count_per_genre('Drama', c(1990,2010)) # -------- #Filtered between the dates A <- df %>% filter(between(df$year, as.integer(year_info[1]), as.integer(year_info[2]))) #Count number of movies per genre A <- A %>% drop_na(Major_Genre) %>% group_by(Major_Genre) %>% summarise(count_per_genre = n()) #Used for highlighting specific genre A <- A %>% mutate(to_highlight = ifelse(Major_Genre == genre,"Yes", "No")) #Return the ggplot ggplot(A, aes(Major_Genre, count_per_genre, fill = to_highlight)) + geom_bar(stat= 'identity', position = 'dodge') + scale_fill_manual(values = c("Yes" = "orange", "No" = "Grey"), guide = FALSE) + labs(x = "Number of movies produced", y = 'Genre', title = 'Popularity of Genres') + coord_flip() } count_per_genre(genre, year_info) # + average_box_office <- function(genre = 'Comedy', year_info = list(1980,2010)) { # Function creates a plot of Number of movies produced vs. genre # in between years # # -------- # @param : # genre : provide a genre to highlight # year_info : provide a list/vector with 2 values # which give the start and end year to sort by # # -------- # @Return : # a plot of 'Genre' vs. 'Number of movies produced' # # -------- # @Example : # average_box_office('Comedy', list(1990,2010)) # average_box_office('Drama', c(1990,2010)) # -------- #Filtered between the dates B <- df %>% filter(between(df$year, as.integer(year_info[1]), as.integer(year_info[2]))) #Evaluate international gross, gather for tidy format B <- B %>% drop_na(Major_Genre) %>% mutate(International_Gross = Worldwide_Gross - US_Gross) %>% select(Major_Genre, year, US_Gross, International_Gross) %>% gather(key = 'Gross', value = 'amount', -Major_Genre, -year) %>% group_by(year, Gross) %>% summarise(amount = mean(amount)) #Return the ggplot B_plot <- ggplot(B, aes(year, amount, fill = Gross)) + geom_bar(stat= 'identity') + labs(x = "Year", y = 'Dollars', title = 'Average box office') B_plot } average_box_office() # + heatmap <- function(genre = 'Comedy', year_info = list(1950,2010)) { # Function creates a heatmap of profit ratio vs. IMDB rating # in between years # # -------- # @param : # genre : provide a genre to highlight # year_info : provide a list/vector with 2 values # which give the start and end year to sort by # # -------- # @Return : # a plot of 'Genre' vs. 'Number of movies produced' # # -------- # @Example : # heatmap('Comedy', list(1990,2010)) # heatmap('Drama', c(1990,2010)) # -------- #Filtered between the dates C <- df %>% filter(between(df$year, as.integer(year_info[1]), as.integer(year_info[2]))) #Evaluate international gross, gather for tidy format C <- C %>% drop_na(Major_Genre) %>% subset(profit_ratio <= 20) #Return the ggplot C_plot <- ggplot(C, aes(profit_ratio, IMDB_Rating, fill = profit_ratio)) + geom_point() + #scale_fill_gradient(low = "white", high = "steelblue") + labs(x = "profit ratio", y = 'IMDB rating', title = 'Profit ratio vs. IMDB rating') C_plot } heatmap() # -
.ipynb_checkpoints/data wrangling-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd history_location = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Classification-2/master/data/lending-club-subset.csv' current_location = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Classification-2/master/data/primaryMarketNotes_browseNotes_1-RETAIL.csv' history = pd.read_csv(history_location) history['issue_d'] = pd.to_datetime(history['issue_d'], infer_datetime_format=True) current = pd.read_csv(current_location) # + history['earliest_cr_line'] = pd.to_datetime(history['earliest_cr_line'], infer_datetime_format=True) history['earliest_cr_line'] = history['issue_d'] - history['earliest_cr_line'] history['earliest_cr_line'] = history['earliest_cr_line'].dt.days current['earliest_cr_line'] = pd.to_datetime(current['earliest_cr_line'], infer_datetime_format=True) current['earliest_cr_line'] = pd.Timestamp.today() - current['earliest_cr_line'] current['earliest_cr_line'] = current['earliest_cr_line'].dt.days history['sec_app_earliest_cr_line'] = pd.to_datetime(history['sec_app_earliest_cr_line'], infer_datetime_format=True, errors='coerce') history['sec_app_earliest_cr_line'] = history['issue_d'] - history['sec_app_earliest_cr_line'] history['sec_app_earliest_cr_line'] = history['sec_app_earliest_cr_line'].dt.days current['sec_app_earliest_cr_line'] = pd.to_datetime(current['sec_app_earliest_cr_line'], infer_datetime_format=True, errors='coerce') current['sec_app_earliest_cr_line'] = pd.Timestamp.today() - current['sec_app_earliest_cr_line'] current['sec_app_earliest_cr_line'] = current['sec_app_earliest_cr_line'].dt.days history['issue_d_year'] = history['issue_d'].dt.year history['issue_d_month'] = history['issue_d'].dt.month current['issue_d_year'] = pd.Timestamp.today().year current['issue_d_month'] = pd.Timestamp.today().month # - history['percent_paid'] = history['total_pymnt'] / history['funded_amnt'] target = 'loan_status' X = history.drop(columns=target) y = history[target] # + from sklearn.model_selection import train_test_split X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, test_size=20000, stratify=y) X_train, X_val, y_train, y_val = train_test_split(X_trainval, y_trainval, test_size=20000, stratify=y_trainval) X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape # + cols = ['id', 'issue_d', 'grade', 'percent_paid', 'term', 'int_rate'] result_train = X_train[cols].copy() result_val = X_val[cols].copy() result_test = X_test[cols].copy() result_val.head() # - common_columns = set(history.columns) & set(current.columns) just_history = set(history.columns) - set(current.columns) just_current = set(current.columns) - set(history.columns) features = list(common_columns) X_train = X_train[features] X_val = X_val[features] X_test = X_test[features] # Looking to find new features pd.options.display.max_columns = 200 X_train.head() X_train['emp_title'].value_counts() def wrangle(df): df = df.copy() for col in df: df[col+'_NULL'] = df[col].isnull() df['int_rate'] = df['int_rate'].str.strip('%').astype(float) df['revol_util'] = df['revol_util'].str.strip('%').astype(float) df['emp_length'] = df['emp_length'].str.replace(r'\D','').astype(float) df['emp_title'] = df['emp_title'].str.lower() df['emp_title_manager'] = df['emp_title'].str.contains('manager', na=False) df['emp_title_teacher'] = df['emp_title'].str.contains('teacher', na=False) df['emp_title_owner'] = df['emp_title'].str.contains('owner', na=False) df['title'] = df['title'].str.len() df['desc'] = df['desc'].str.len() df['emp_title'] = df['emp_title'].str.len() sub_grade_ranks = {'A1': 1, 'A2': 2, 'A3': 3, 'A4': 4, 'A5': 5, 'B1': 6, 'B2': 7, 'B3': 8, 'B4': 9, 'B5': 10, 'C1': 11, 'C2': 12, 'C3': 13, 'C4': 14, 'C5': 15, 'D1': 16, 'D2': 17, 'D3': 18, 'D4': 19, 'D5': 20} df['sub_grade'] = df['sub_grade'].map(sub_grade_ranks) df = df.drop(columns='id') df = df.drop(columns='url') df = df.drop(columns='member_id') df = df.drop(columns='grade') df = df.drop(columns='zip_code') features = ['acc_open_past_24mths', 'addr_state', 'all_util', 'annual_inc', 'annual_inc_joint', 'avg_cur_bal', 'bc_open_to_buy', 'bc_util', 'collections_12_mths_ex_med', 'delinq_amnt', 'desc_NULL', 'dti', 'dti_joint', 'earliest_cr_line', 'emp_length', 'emp_length_NULL', 'emp_title', 'emp_title_NULL', 'emp_title_owner', 'fico_range_high', 'funded_amnt', 'home_ownership', 'inq_last_12m', 'inq_last_6mths', 'installment', 'int_rate', 'issue_d_month', 'issue_d_year', 'loan_amnt', 'max_bal_bc', 'mo_sin_old_il_acct', 'mo_sin_old_rev_tl_op', 'mo_sin_rcnt_rev_tl_op', 'mort_acc', 'mths_since_last_major_derog_NULL', 'mths_since_last_record', 'mths_since_recent_bc', 'mths_since_recent_inq', 'num_actv_bc_tl', 'num_actv_rev_tl', 'num_op_rev_tl', 'num_rev_tl_bal_gt_0', 'num_tl_120dpd_2m_NULL', 'open_rv_12m_NULL', 'open_rv_24m', 'pct_tl_nvr_dlq', 'percent_bc_gt_75', 'pub_rec_bankruptcies', 'purpose', 'revol_bal', 'revol_bal_joint', 'sec_app_earliest_cr_line', 'sec_app_fico_range_high', 'sec_app_open_acc', 'sec_app_open_act_il', 'sub_grade', 'term', 'title', 'title_NULL', 'tot_coll_amt', 'tot_hi_cred_lim', 'total_acc', 'total_bal_il', 'total_bc_limit', 'total_cu_tl', 'total_rev_hi_lim'] df = df[features] return df # + X_train = wrangle(X_train) X_val = wrangle(X_val) X_test = wrangle(X_test) X_train.shape, X_val.shape, X_test.shape # + import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline from xgboost import XGBClassifier processor = make_pipeline(ce.OrdinalEncoder(), SimpleImputer(strategy='median')) X_train_processed = processor.fit_transform(X_train) X_val_processed = processor.transform(X_val) eval_set = [(X_train_processed, y_train), (X_val_processed, y_val)] model = XGBClassifier(n_estimators=2000, n_jobs=-1) model.fit(X_train_processed, y_train, eval_set=eval_set, eval_metric='auc', early_stopping_rounds=15) # - # %matplotlib inline import matplotlib.pyplot as plt from sklearn.calibration import calibration_curve y_pred_proba = model.predict_proba(X_val_processed)[:, 1] prob_true, prob_pred = calibration_curve(y_val, y_pred_proba, n_bins=8) plt.plot((0,1), (0,1), linestyle='--', color='grey') plt.plot(prob_pred, prob_true); # + def get_results(result_df, y_true, y_pred_proba): # from copy import copy result_df = result_df.copy() result_df['loan_status'] = y_true result_df['pred_proba'] = y_pred_proba result_df['int_rate'] = result_df['int_rate'].str.strip('%').astype(float) result_df['term'] = result_df['term'].str.replace(r'\D', '').astype(int) result_df['max_interest'] = result_df['int_rate'] * result_df['term'] / 12 result_df['best_case'] = 25 + result_df['max_interest']/100 * 25 result_df['worst_case'] = -25 result_df['expected_value'] = (result_df['pred_proba'] * result_df['best_case'] + (1-result_df['pred_proba']) * result_df['worst_case']) return result_df result_val = get_results(result_val, y_val, y_pred_proba) # - result_val.head() result_val.describe() # + # %matplotlib inline from IPython.display import display import matplotlib.pyplot as plt from scipy.stats import percentileofscore import seaborn as sns from tqdm import tnrange def simulate(df, n_picks=40, n_sims=10000, grades=['A', 'B', 'C', 'D'], start_date='2007-07-01', end_date='2019-03-01', min_expected_value=-25): condition = ((df['grade'].isin(grades)) & (df['issue_d'] >= start_date) & (df['issue_d'] <= end_date) & (df['expected_value'] >= min_expected_value)) possible = df[condition] simulations = [] for _ in tnrange(n_sims): picks = possible.sample(n_picks).copy() picks['paid'] = 25 * picks['percent_paid'] paid = picks['paid'].sum() simulations.append(paid) simulations = pd.Series(simulations) sns.distplot(simulations) plt.axvline(x=1000) percent = percentileofscore(simulations, 1000) print(simulations.describe().to_string()) plt.title(f'{percent}% of simulations did not profit') # - simulate(result_val) simulate(result_val, grades=['B']) simulate(result_val, grades=['B']) simulate(result_val, grades=['C']) # + # Doing Shaply import numpy as np result_test = get_results(result_test, y_test, y_pred_proba) min_expected_value = np.percentile(result_test['expected_value'], 80) simulate(result_test, min_expected_value=min_expected_value) # - df = result_test.copy() condition = (df['expected_value'] >= min_expected_value) possible = df[condition] picks = possible.sample(40).copy() picks data_for_prediction = X_test[X_test.index==14429] data_for_prediction data_for_prediction_processed = processor.transform(data_for_prediction) data_for_prediction_processed = pd.DataFrame(data_for_prediction_processed) data_for_prediction_processed.columns = data_for_prediction.columns import shap shap.initjs() explainer = shap.TreeExplainer(model) shap_values = explainer.shap_values(data_for_prediction_processed) shap.force_plot(explainer.expected_value, shap_values, data_for_prediction_processed) plt.figure(figsize=(10,30)) pd.Series(shap_values[0], X_test.columns).sort_values().plot.barh(color='blue')
module3-making-decisions/4.4.3 Assignment Mastin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #trying heading 1 # # trying heading 1 # ## trying heading 2 # # # - fgfgsfd # - fdfgdf # - ffdghfd # ![](https://ichef.bbci.co.uk/news/976/cpsprodpb/12A9B/production/_111434467_gettyimages-1143489763.jpg) # # Adding some more shit #
Jupyter-sept2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # TASK /: IMPORT ANY LIBRARIES YOU THINK YOU WILL NEED TO SCRAP A WEBSITE import requests import bs4 # + # TASK : USE REQUESTS LIBRARY AND BEAUTIFULSOUP TO CONNECT TO http://quotes.toscrape.com/ AND GET THE HTML TEXT FROM THE HOMEPAGE res = requests.get('http://quotes.toscrape.com') # - soup=bs4.BeautifulSoup(res.text,'lxml') soup soup.select('.author') authors=set() for name in soup.select('.author'): authors.add(name.text) authors # + # TASK TO CREATEA LIST OF ALL QUOTES ON THE FIRST PAGE # - soup.select('.text') quotes=[] for quote in soup.select('.text'): quotes.append(quote.text) quotes # TASK : INSPECT THE SITE AND USE BEAUTIFUL SOUP TO EXTRACT THE TOP TEN TAGS FROM THE REQUESTS TEXT SHOWN ON THE TOP RIGHT FROM # THE HOME PAGE (E.G LOVE, INSPIRATIONL, LIFE ETC) soup.select('.tag-item') for item in soup.select('.tag-item'): print(item.text) # + # TASK : NOTICE HW THERE IS MORE THAN ONE PAGE , AND SUBSEQUENT PAGES LOOK LIKE THIS # - url='http://quotes.toscrape.com/page/{}/' authors=set() # + for page_number in range(1,10): page_url=url.format(page_number) result = requests.get(page_url) soup_page=bs4.BeautifulSoup(result.text,'lxml') for name in soup_page.select('.author'): authors.add(name.text) # - authors # + # TASK : IF YOU DO NOT KNOW THE LAST NUMBER OF PAGE THEN WHAT WE CAN DO authors_name=set() for page_number in range(1,20): page_url=url.format(page_number) result = requests.get(page_url) soup_page=bs4.BeautifulSoup(result.text,'lxml') if not "No quotes found!" in result.text: for name in soup_page.select('.author'): authors_name.add(name.text) else: break # - authors_name "No quotes found!" in result.text # + page_still_valid = True authors_name=set() page=1 while page_still_valid: page_url = url.format(page_number)
webscrapping/webscrap_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # VoronoiSite example # + from pymatgen.io.vasp import Poscar, Xdatcar from pymatgen.symmetry.groups import SpaceGroup from pymatgen.core import Lattice, Structure import numpy as np from site_analysis.atom import Atom from site_analysis.trajectory import Trajectory from site_analysis.voronoi_site import VoronoiSite from site_analysis.tools import get_vertex_indices from collections import Counter import tqdm # - # Load a `POSCAR` file where every octahedral site is occupied by a Na atom. all_na_structure = Poscar.from_file('na_sn_all_na_ext.POSCAR.vasp').structure vertex_species = 'S' centre_species = 'Na' # Create a series of pymatgen Structures using the `Structure.from_spacegroup()` method, that each only contain the NaX sites, using the coordinates from Ramos _et al._ _Chem. Mater._ 2018. sg = SpaceGroup('I41/acd:2') lattice = all_na_structure.lattice na1 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.25, 0.0, 0.125]]) na2 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.00, 0.0, 0.125]]) na3 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.0, 0.25, 0.0]]) na4 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.0, 0.0, 0.0]]) na5 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.75, 0.25, 0.0]]) na6 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.5, 0.75, 0.625]]) i2 = Structure.from_spacegroup(sg='I41/acd:2', lattice=lattice, species=['Na'], coords=[[0.666, 0.1376, 0.05]]) na_structures = {'Na1': na1, 'Na2': na2, 'Na3': na3, 'Na4': na4, 'Na5': na5, 'Na6': na6, 'i2': i2} na1_sites = [VoronoiSite(s.frac_coords, label='Na1') for s in na1] na2_sites = [VoronoiSite(s.frac_coords, label='Na2') for s in na2] na3_sites = [VoronoiSite(s.frac_coords, label='Na3') for s in na3] na4_sites = [VoronoiSite(s.frac_coords, label='Na4') for s in na4] na5_sites = [VoronoiSite(s.frac_coords, label='Na5') for s in na5] na6_sites = [VoronoiSite(s.frac_coords, label='Na6') for s in na6] i2_sites = [VoronoiSite(s.frac_coords, label='i2') for s in i2] sites = na1_sites + na2_sites + na3_sites + na4_sites + na5_sites + na6_sites + i2_sites structure = Poscar.from_file('POSCAR').structure # create Polyhedron objects # create Atom objects # Note: index is the atom index in the pymatgen structures we are going to process. # In this example, Na are the first species, so we count from 0. atoms = [Atom(index=i, species_string=centre_species) for i, site in enumerate(structure) if site.species_string == 'Na'] trajectory = Trajectory(sites, atoms) print(trajectory.site_labels()) trajectory.append_timestep(structure) # The occupations of each site are stored as a list of lists, as each site can have zero, one, or multiple atoms occupying it. print(trajectory.sites_trajectory[0]) # Rough example for collecting only occupied sites, and counting their site types: c = Counter() for site in trajectory.sites: c[site.label] += len([ 1 for ts in site.trajectory if len(ts)>0 ]) c # vs. all sites: c_sites = Counter(trajectory.site_labels()) c_sites # + trajectory.reset() xdatcar = Xdatcar('XDATCAR_Sn') trajectory.trajectory_from_structures( xdatcar.structures, progress='notebook') # - n_timesteps = len(trajectory.timesteps) c_sites = Counter(trajectory.site_labels()) c = Counter() p_occ = {} for site in trajectory.sites: c[site.label] += len([ 1 for ts in site.trajectory if len(ts)>0 ]) for k, v in c.items(): p_occ[k] = v / c_sites[k] / n_timesteps p_occ # check total average occupation = 88 atoms for k,v in c.items(): print( k, p_occ[k]*c_sites[k]) print( sum( [ p_occ[k] * c_sites[k] for k, v in c.items()]))
example/Shortest distance site example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .iga-python # language: python # name: .iga-python # --- # # 1. 2D Linear Convection # # We consider the 1d linear Convection equation, under a constant velocity # # $$ # \partial_t u + \mathbf{a} \cdot \nabla u = 0 # $$ # needed imports from numpy import zeros, ones, linspace, zeros_like from matplotlib.pyplot import plot, contourf, show, colorbar # %matplotlib inline # + # Initial condition import numpy as np u0 = lambda x,y: np.exp(-(x-.3)**2/.05**2)*np.exp(-(y-.3)**2/.05**2) ts = linspace(0., 1., 401) x,y = np.meshgrid(ts,ts) u = u0(x,y) # - contourf(x,y, u); colorbar() ; show() # ### Time scheme # # $$\frac{u^{n+1}-u^n}{\Delta t} + \mathbf{a} \cdot \nabla u^{n+1} = 0 $$ # # $$ \left(I + \Delta t \mathbf{a} \cdot \nabla \right) u^{n+1} = u^n $$ # # ### Weak formulation # # $$ # \langle v, u^{n+1} \rangle + \Delta t ~ \langle v, \mathbf{a} \cdot \nabla u^{n+1} \rangle = \langle v, u^n \rangle # $$ # if we assume $\mathbf{a} = \left( a_1, a_2 \right)^T$ is a constant, then our weak formulation writes # $$ # \langle v, u^{n+1} \rangle - \Delta t ~ \langle \mathbf{a} \cdot \nabla v , u^{n+1} \rangle = \langle v, u^n \rangle # $$ # # expending $u^n$ over the fem basis, we get the linear system # # $$A U^{n+1} = M U^n$$ # # where # # $$ # M_{ij} = \langle b_i, b_j \rangle # $$ # # $$ # A_{ij} = \langle b_i, b_j \rangle - \Delta t ~ \langle \mathbf{a} \cdot \nabla b_i, b_j \rangle # $$ # # ## Abstract Model using SymPDE from sympde.core import Constant from sympde.expr import BilinearForm, LinearForm, integral from sympde.topology import ScalarFunctionSpace, Square, element_of from sympde.calculus import grad, dot from sympy import Tuple # + # ... abstract model domain = Square() V = ScalarFunctionSpace('V', domain) x,y = domain.coordinates u,v = [element_of(V, name=i) for i in ['u', 'v']] a1 = Constant('a1') a2 = Constant('a2') dt = Constant('dt') a = Tuple(a1,a2) # bilinear form expr = v*u + dt* dot(a, grad(u))*v a = BilinearForm((u,v), integral(domain , expr)) # bilinear form for the mass matrix expr = u*v m = BilinearForm((u,v), integral(domain , expr)) # linear form for initial condition from sympy import exp expr = exp(-(x-.3)**2/.05**2)*exp(-(y-.3)**2/.05**2)*v l = LinearForm(v, integral(domain, expr)) # - # ## Discretization using Psydac from psydac.api.discretization import discretize # + a1 = 1. ; a2 = 0. # wavespeed T = 0.25 # T final time dt = 0.001 niter = int(T / dt) degree = [3,3] # spline degree ncells = [64,64] # number of elements # + # Create computational domain from topological domain domain_h = discretize(domain, ncells=ncells, comm=None) # Discrete spaces Vh = discretize(V, domain_h, degree=degree) # Discretize the bilinear forms ah = discretize(a, domain_h, [Vh, Vh]) mh = discretize(m, domain_h, [Vh, Vh]) # Discretize the linear form for the initial condition lh = discretize(l, domain_h, Vh) # + # assemble matrices and convert them to scipy M = mh.assemble().tosparse() A = ah.assemble(a1=a1, a2=a2, dt=dt).tosparse() # assemble the rhs and convert it to numpy array rhs = lh.assemble().toarray() # - from scipy.sparse.linalg import gmres # L2 projection of the initial condition un, status = gmres(M, rhs, tol=1.e-8, maxiter=5000) # + from utilities.plot import plot_field_2d nbasis = [W.nbasis for W in Vh.spaces] plot_field_2d(Vh.knots, Vh.degree, un.reshape(nbasis)) ; colorbar() ; show() # - for i in range(0, niter): b = M.dot(un) un, status = gmres(A, b, tol=1.e-8, maxiter=5000) nbasis = [W.nbasis for W in Vh.spaces] plot_field_2d(Vh.knots, Vh.degree, un.reshape(nbasis)) ; colorbar() ; show()
lessons/Chapter2/04_convection_2d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + import igraph from igraph import Graph, EdgeSeq import plotly.graph_objects as go name=[] fossils=[] locomotion=[] cranial=[] startDate=[] endDate=[] ecology=[] # + #Set up with igraph nr_vertices = 9 v_label = list(map(str, range(nr_vertices))) G = Graph.Tree(nr_vertices, 2) # 2 stands for children number lay = G.layout('rt') position = {k: lay[k] for k in range(nr_vertices)} Y = [lay[k][1] for k in range(nr_vertices)] M = max(Y) es = EdgeSeq(G) # sequence of edges E = [e.tuple for e in G.es] # list of edges L = len(position) Xn = [position[k][0] for k in range(L)] Yn = [2*M-position[k][1] for k in range(L)] Xe = [] Ye = [] for edge in E: Xe+=[position[edge[0]][0],position[edge[1]][0], None] Ye+=[2*M-position[edge[0]][1],2*M-position[edge[1]][1], None] labels = v_label Xe = [0,1,2,3] Ye = [0,1,2,3] # + #Add traces fig = go.Figure() fig.add_trace(go.Scatter(x=Xe, y=Ye, mode='lines', line=dict(color='rgb(210,210,210)', width=1), hoverinfo='none' )) fig.add_trace(go.Scatter(x=Xn, y=Yn, mode='markers', name='bla', marker=dict(symbol='circle-dot', size=18, color='#6175c1', #'#DB4551', line=dict(color='rgb(50,50,50)', width=1) ), text=labels, hoverinfo='text', opacity=0.8 )) # -
phylotree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (geog) # language: python # name: geog # --- # # LKA urbanization review # http://wiki.worldbank.org/display/GEOS/LKA_SSAU1_ColomboUrbanization # # Will perform the following analyses: # 1. Extract urban areas # 2. Summarize nighttime lights # 3. Summarize built area change # 4. Run Market access # a. Airports # b. Ports # c. Schools # d. Hospitals # + import sys, os, importlib import rasterio, boto3 import reverse_geocode import geopandas as gpd import pandas as pd import skimage.graph as graph # Import GOST libraries; sys.path.append will be unnecessary if libraries are already installed sys.path.append("../../../../gostrocks/src") sys.path.append("../../../../GOST_Urban") sys.path.append("../../../../GOSTNets_Raster/src") import GOSTNets_Raster.market_access as ma import GOSTRocks.rasterMisc as rMisc import src.UrbanRaster as urban from GOSTRocks.misc import tPrint # %matplotlib inline # + iso3 = 'LKA' out_folder = "/home/wb411133/temp/%s" % iso3 if not os.path.exists(out_folder): os.makedirs(out_folder) global_friction = "/home/public/Data/GLOBAL/INFRA/FRICTION_2020/2020_motorized_friction_surface.geotiff" # Define local files friction_file = os.path.join(out_folder, "friction_surface.tif") focal_admin2 = os.path.join(out_folder, "admin.shp") wp_1km = os.path.join(out_folder, "WP_2020_1km.tif") urban_extents = os.path.join(out_folder, "urban_extents.shp") hd_urban_extents = os.path.join(out_folder, "hd_urban_extents.shp") airports = os.path.join(out_folder, "airports.shp") ports = os.path.join(out_folder, "ports.shp") if not os.path.exists(friction_file): rMisc.clipRaster(rasterio.open(global_friction), gpd.read_file(focal_admin2), friction_file) # Calculate urban extents from 1km WorldPop if not os.path.exists(urban_extents): urban_shp = wp.calculateUrban(rasterio.open(wp_1km), smooth=False) urban_shp.to_file(urban_extents) # Calculate urban extents from 1km WorldPop if not os.path.exists(hd_urban_extents): urban_r = urban.urbanGriddedPop(wp_1km) urban_ext = urban_r.calculateUrban(densVal=300, totalPopThresh=5000, smooth=False, queen=False) urban_ext.to_file(urban_extents) hd_urban_ext = urban_r.calculateUrban(densVal=1500, totalPopThresh=50000, smooth=True, queen=True) hd_urban_ext.to_file(hd_urban_extents) # + #Summarize nighttime lights bucket = "wbgdecinternal-ntl" s3 = boto3.client('s3') viirs_annual = s3.list_objects(Bucket=bucket, Prefix = 'NTL/VIIRS/Annual/VIIRS_ANNUAL_EOG/') viirs_files = [os.path.join(f's3://{bucket}/', x['Key']) for x in viirs_annual['Contents']] urb_ext = gpd.read_file(urban_extents) hd_urb_ext = gpd.read_file(hd_urban_extents) inAdm = gpd.read_file(focal_admin2) for viirs_file in viirs_files: yr = viirs_file.split("_")[-4] curR = rasterio.open(viirs_file) # Summarize VIIRS in urban extents res = rMisc.zonalStats(urb_ext, curR, minVal=0.1, allTouched=True) res = pd.DataFrame(res, columns=['SUM','MIN','MAX','MEAN']) urb_ext[f'v_{yr}'] = res['SUM'] # Summarize VIIRS in hd urban extents res = rMisc.zonalStats(hd_urb_ext, curR, minVal=0.1, allTouched=True) res = pd.DataFrame(res, columns=['SUM','MIN','MAX','MEAN']) hd_urb_ext[f'v_{yr}'] = res['SUM'] # Summarize VIIRS in admin areas res = rMisc.zonalStats(inAdm, curR, minVal=0.1, allTouched=False) res = pd.DataFrame(res, columns=['SUM','MIN','MAX','MEAN']) inAdm[f'v_{yr}'] = res['SUM'] # - pd.DataFrame(urb_ext).drop(['geometry'], axis=1).to_csv(os.path.join(out_folder, "urban_viirs.csv")) pd.DataFrame(hd_urb_ext).drop(['geometry'], axis=1).to_csv(os.path.join(out_folder, "hd_urban_viirs.csv")) pd.DataFrame(inAdm).drop(['geometry'], axis=1).to_csv(os.path.join(out_folder, "adm_viirs.csv")) # + # summarize built area change global_ghsl = "/home/public/Data/GLOBAL/GHSL/ghsl.vrt" inR = rasterio.open(global_ghsl) if urb_ext.crs != inR.crs: urb_ext = urb_ext.to_crs(inR.crs) if hd_urb_ext.crs != inR.crs: hd_urb_ext = hd_urb_ext.to_crs(inR.crs) if inAdm.crs != inR.crs: inAdm = inAdm.to_crs(inR.crs) ex_cols = [0,1,2,3,4,5,6] col_names=['cNA','H20','notBuilt','b2014','b2000','b1990','b1975'] # Summarize GHSL in urban extents urb_res = rMisc.zonalStats(urb_ext, inR, rastType='C', unqVals=ex_cols) urb_res = pd.DataFrame(urb_res, columns=col_names) urb_res['ID'] = urb_ext['ID'] # Summarize GHSL in hd urban extents hd_urb_res = rMisc.zonalStats(hd_urb_ext, inR, rastType='C', unqVals=ex_cols) hd_urb_res = pd.DataFrame(hd_urb_res, columns=col_names) hd_urb_res['ID'] = hd_urb_ext['ID'] # Summarize GHSL in admin areas adm_res = rMisc.zonalStats(inAdm, inR, rastType='C', unqVals=ex_cols) adm_res = pd.DataFrame(adm_res, columns=col_names) adm_res['ID'] = inAdm['WB_ADM2_CO'] # - urb_res.to_csv(os.path.join(out_folder, "urban_ghsl.csv")) hd_urb_res.to_csv(os.path.join(out_folder, "hd_urban_ghsl.csv")) adm_res.to_csv(os.path.join(out_folder, "adm_ghsl.csv")) # + # Run market access inR = rasterio.open(friction_file) frictionD = inR.read()[0,:,:] * 1000 mcp = graph.MCP_Geometric(frictionD) port_data = gpd.read_file(ports) airport_data = gpd.read_file(airports) urb_centroids = urb_ext.copy() urb_centroids['geometry'] = urb_centroids['geometry'].apply(lambda x: x.centroid) hd_urb_centroids = hd_urb_ext.copy() hd_urb_centroids['geometry'] = hd_urb_centroids['geometry'].apply(lambda x: x.centroid) # + importlib.reload(ma) popR = rasterio.open(wp_1km) inR = rasterio.open(friction_file) frictionD = inR.read()[0,:,:] * 1000 mcp = graph.MCP_Geometric(frictionD) port_access = ma.summarize_travel_time_populations(popR, inR, port_data, mcp, inAdm, os.path.join(out_folder, 'port_tt.tif')) airport_access = ma.summarize_travel_time_populations(popR, inR, airport_data, mcp, inAdm, os.path.join(out_folder, 'airport_tt.tif')) urban_access = ma.summarize_travel_time_populations(popR, inR, urb_centroids, mcp, inAdm, os.path.join(out_folder, 'urban_tt.tif')) hd_urban_access = ma.summarize_travel_time_populations(popR, inR, hd_urb_centroids, mcp, inAdm, os.path.join(out_folder, 'hd_urban_tt.tif')) # - pd.DataFrame(port_access.drop(['geometry'], axis=1)).to_csv(os.path.join(out_folder, "port_access.csv")) pd.DataFrame(airport_access.drop(['geometry'], axis=1)).to_csv(os.path.join(out_folder, "airport_access.csv")) pd.DataFrame(urban_access.drop(['geometry'], axis=1)).to_csv(os.path.join(out_folder, "urban_access.csv")) pd.DataFrame(hd_urban_access.drop(['geometry'], axis=1)).to_csv(os.path.join(out_folder, "hd_urban_access.csv"))
Implementations/FY22/URB_SSAU1_LKA_UrbanizationReview/LKA_UR_DataPrep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # ## Jupyter Notebook for Checking Python Package Requirements # + def get_packages(pkgs): versions = [] for p in packages: try: imported = __import__(p) try: versions.append(imported.__version__) except AttributeError: try: versions.append(imported.version) except AttributeError: try: versions.append(imported.version_info) except AttributeError: versions.append('0.0') except ImportError: print('[FAIL]: %s is not installed' % p) return versions packages = ['numpy', 'scipy', 'matplotlib', 'sklearn', 'pandas', 'mlxtend'] suggested_v = ['1.10', '0.17', '1.5.1', '0.17.1', '0.17.1', '0.4.2'] versions = get_packages(packages) for p, v, s in zip(packages, versions, suggested_v): if v < s: print('[FAIL] %s %s, please upgrade to >= %s' % (p, v, s)) else: print('[OK] %s %s' % (p, v)) # - # %load_ext watermark # %watermark -d -p numpy,scipy,matplotlib,sklearn,pandas,mlxtend
code/check_environment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyCharm (recsys-challenge-2021-twitter) # language: python # name: pycharm-cc02c472 # --- import dask import dask.dataframe as dd from dask.distributed import LocalCluster, Client import pyarrow.parquet as pq import pyarrow as pa import pandas as pd pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) dask.config.set({"temporary-directory": "/home/ubuntu/data/dask_tmp"}) dask.config.set({'distributed.worker.memory.target': 0.85}) dask.config.set({'distributed.worker.memory.spill': 0.90}) dask.config.set({'distributed.worker.memory.pause': 0.93}) dask.config.set({'distributed.worker.memory.terminate': 0.96}) # + def start_cluster(n_workers, threads_per_worker, memory_limit, processes): cluster = LocalCluster( n_workers=n_workers, threads_per_worker=threads_per_worker, memory_limit=memory_limit, processes=processes ) client = Client(cluster) # use default n_threads and mem print(client) print(client.cluster) return client c = start_cluster(n_workers=8, threads_per_worker=1, memory_limit="24GB", processes=True) # - path = "Preprocessed/Valid/FeatureExtraction/All_feature_dataset/Valid_with_TE" schema = pa.Schema.from_pandas(pd.read_parquet(path + "/part.0.parquet", engine='pyarrow')) df = dd.read_parquet(path, engine='pyarrow') df df.columns.to_list() # + train_df = df[~df['is_from_official_val']] val_df = df[df['is_from_official_val']] val1_df, val2_df = val_df.random_split([2/3, 1/3], random_state=123) train_df_time = train_df[train_df['tweet_timestamp'] > 1.614011e+09] train_df_notime = train_df # + final_train_noval_notime= train_df_notime final_train_noval_time = train_df_time final_train_val_notime = dd.concat([train_df_notime, val1_df], axis=0) final_train_val_time = dd.concat([train_df_time, val1_df], axis=0) test_df = val2_df # - final_train_noval_notime = final_train_noval_notime.repartition(partition_size="200MB") final_train_noval_notime.to_parquet("new/final_train_noval_notime", engine='pyarrow', schema=schema, overwrite=True) final_train_noval_time = final_train_noval_time.repartition(partition_size="200MB") final_train_noval_time.to_parquet("new/final_train_noval_time", engine='pyarrow', schema=schema, overwrite=True) final_train_val_notime = final_train_val_notime.repartition(partition_size="200MB") final_train_val_notime.to_parquet("new/final_train_val_notime", engine='pyarrow', schema=schema, overwrite=True) final_train_val_time = final_train_val_time.repartition(partition_size="200MB") final_train_val_time.to_parquet("new/final_train_val_time", engine='pyarrow', schema=schema, overwrite=True) test_df = test_df.repartition(partition_size="200MB") test_df.to_parquet("new/test", engine='pyarrow', schema=schema, overwrite=True)
final_splits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # <br> # # # Introdução # + # #!pip3 install kaleido --upgrade # - import numpy as np import pandas as pd import plotly.graph_objects as go # + tags=["remove_cell"] from paths import * # + [markdown] pycharm={"name": "#%% md\n"} # <br> # # ## Diretriz # # <br> # # De acordo com a [Diretriz Nacional do Plano de Amostragem da Vigilância da Qualidade da Água para Consumo Humano da Vigilância Sanitária](http://bvsms.saude.gov.br/bvs/publicacoes/diretriz_nacional_plano_amostragem_agua.pdf), o número mínimo de amostras mensais de alguns parâmetros (cloro residual, turbidez, coliformes, *E. coli* e fluoreto), a ser realizada pela vigilância sanitária municipal, varia de acordo com a população. # # + [markdown] jp-MarkdownHeadingCollapsed=true tags=["remove_cell"] # ### Número de Amostras para Cloro Residual, Turbidez, Coliformes, *E. coli* # # As amostras de cloro residual são feitas pelo prestador do serviço de abastecimento público (controle), bem como pela vigilância sanitária municipal (vigilância), sendo esse último o principal foco destas análises. # # ![Tabela 1](https://i.imgur.com/6PJNeCp.png) # # <br> # # Ainda, de acordo com as diretrizes, *o número mínimo mensal de análises previsto para o Plano de Amostragem Básico é definido em função das faixas populacionais e constitui um quantitativo único a ser distribuído para o monitoramento da qualidade da água referente às três formas de abastecimento de água (SAA, SAC e SAI)*. Portanto, para analisar se a vigilância sanitária municipal segue as diretrizes estipuladas pelo Ministério da Saúde para amostragem de cloro residual, não será feito, nesse momento, detalhamento sobre o número de amostras realizadas para cada sistema de abastecimento, avaliando-se somente o número total de amostras que devem ser realizadas pela vigilância municipal. # # Nos códigos abaixo, a tabela que consta nas diretrizes do Ministério da Saúde foi codificada, possibilitando análises. # - def create_table_cloro(): # Criando tabela df = pd.DataFrame( { 'População De': [0, 5001, 10001, 50001, 200001, 500001], 'População Até': [5000, 10000, 50000, 200000, 500000, np.inf], 'Nº de Amostras': [6, 9, '8 + (1 para cada 7,5 mil habitantes)', '10 + (1 para cada 10 mil habitantes)', '20 + (1 para cada 20 mil habitantes)', '35 + (1 para cada 50 mil habitantes)'] } ) # Criando e Ajustando Coluna Adicional df['Nº de Amostras Fixo'] = df['Nº de Amostras'].str.split('+').str[0] df['Nº de Amostras Fixo'] = df['Nº de Amostras Fixo'].fillna(df['Nº de Amostras']).astype(float) # Criando e Ajustando Coluna Adicional df['Nº de Amostras Variável'] = df['Nº de Amostras'].str.split('+').str[-1] df['Nº de Amostras Variável'] = df['Nº de Amostras Variável'].fillna(0) df['Nº de Amostras Variável'] = df['Nº de Amostras Variável'].replace({'\(1 para cada': '','mil habitantes\)': '',',': '.'}, regex=True).astype(float) df['Nº de Amostras Variável'] = (df['Nº de Amostras Variável']*1000).astype(int) # Tabela return df # <br> # # Uma vez cofificada, o *campo* descritivo **Nº de Amostras**, que contem o número de análises, foi dividido entre o **Nº de Amostras Fixo** e **Nº de Amostras Variável**, possibilitando a automatização dos cálculos. # + tags=["remove_cell"] create_table_cloro() # - # <br> # # Com a tabela codificada, foi escrita uma função que calcula o número de amostras, seguindo as condições recomendadas pelo Ministério da Sáude. def numero_amostras_cloro(x): df_cloro = create_table_cloro() array = np.where( (x >= df_cloro['População De']) & (x <= df_cloro['População Até']) & (df_cloro['Nº de Amostras Variável']>0), (df_cloro['Nº de Amostras Fixo'] + x/(df_cloro['Nº de Amostras Variável'])), np.where( (x >= df_cloro['População De']) & (x <= df_cloro['População Até']) & (df_cloro['Nº de Amostras Variável']==0), df_cloro['Nº de Amostras Fixo'], np.nan ) ) array = np.trunc(array) array = array[~np.isnan(array)] return array[0] # + [markdown] tags=["remove_cell"] # <br> # # ### Número de Amostras para Fluoreto # # # ![Tabel 2](https://i.imgur.com/JXvIYsK.png) # - def create_table_fluoreto(): # Criando tabela df = pd.DataFrame( { 'População De': [0, 50001, 100001, 100001, 500001, 1000001], 'População Até': [50000, 100000, 200000, 500000, 1000000, np.inf], 'Nº de Amostras': [5, 7, 9, 13, 18, 27] } ) # Tabela return df # + tags=["remove_cell"] create_table_fluoreto() # - def numero_amostras_fluoreto(x): df = create_table_fluoreto() array = np.where( (x >= df['População De']) & (x <= df['População Até']), df['Nº de Amostras'], np.nan ) array = np.trunc(array) array = array[~np.isnan(array)] return array[0] # + [markdown] tags=["remove_cell"] # <br> # # ## População # # Abaixo são demonstrados alguns exemplos de cálculo empregando a tabela codificada e função. A definição do número mínimo de amostras pode ser feita utilizando um número isolado, ou em uma série de dados. # + tags=["remove_cell"] # Usando a função com apenas um número individual população = 300000 n_amostras_cloro = numero_amostras_cloro(população) n_amostras_fluoreto = numero_amostras_fluoreto(população) # Results print('Nº de Amostras de Cloro recomendadas para {} habitantes é {}'.format(população, n_amostras_cloro)) print('Nº de Amostras de Fluoreto recomendadas para {} habitantes é {}'.format(população, n_amostras_fluoreto)) # + tags=["remove_cell"] # Usando a função em uma série df = pd.DataFrame({'População': [1500, 10000, 180000, 350000, 600000, 650000]}) df['Nº Amostras Cloro'] = df['População'].apply(lambda x: numero_amostras_cloro(x)) df['Nº Amostras Fluoreto'] = df['População'].apply(lambda x: numero_amostras_fluoreto(x)) # Results df.head() # + [markdown] pycharm={"name": "#%% md\n"} # <br> # # ## Gráfico # # Visando compreender como se dá a distribuição do número de amostras em função do aumento da população, foi elaborado um gráfico que demonstra que o número de amostras tem um crescimento logaritmico, ou seja, para municípios com população até 200.000 hab há um aumento continuo do número de amostras, até 500.000 habitantes o aumento do número de amostras não segue a mesma tendância, sendo reduzido. Em municípios com mais de 500.000 o aumento do número de amostras é ainda menor. # + tags=["remove_cell"] # Cria tabela com população df = pd.Series(np.arange(1000, 2000000, 3000)).array df = pd.DataFrame(df, columns=['n_habitantes']) # Ajustes da tabela df['n_habitantes'] = df['n_habitantes'].astype(int) df['Nº Amostras Cloro'] = df['n_habitantes'].apply(lambda x: numero_amostras_cloro(x)) df['Nº Amostras Fluoreto'] = df['n_habitantes'].apply(lambda x: numero_amostras_fluoreto(x)) # + tags=["remove_cell"] # Criando gráfico fig = go.Figure() # Trace fig.add_trace( go.Scatter( x=df['n_habitantes'], y=df['Nº Amostras Cloro'], name='Nº Amostras Cloro Residual, Turbidez, Coliformes, E. coli', mode='lines', marker={'color': 'blue'}, opacity=0.8, #hovertemplate='s', #hovertemplate="$%{y}<br>Date: %{x}" hovertemplate="%{y}" ) ) # Trace fig.add_trace( go.Scatter( x=df['n_habitantes'], y=df['Nº Amostras Fluoreto'], name='Nº Amostras Fluoreto', mode='lines', marker={'color': 'red'}, opacity=0.8, hovertemplate="%{y}" ) ) # Update Layout fig.update_layout( title='Nº de Amostras Mensais por População', xaxis={'title': 'Nº de Habitantes'}, yaxis={'title': 'Nº de Amostras'}, height=600, separators=',.', paper_bgcolor='rgba(0,0,0,0)', yaxis_tickformat=',.2r', xaxis_tickformat=',.2r', dragmode=False, #hovermode='closest', hovermode='x', #hoverinfo= "name+x+text", #hovermode='x unified', #hoverlabel='ssss', legend=dict( yanchor='top', y=1, xanchor='left', x=0.0 ) ) # Graph config = { 'displaylogo': False, #'scrollZoom': True, 'responsive': False, } fig.write_html(os.path.join(output_path_graph, 'n_amostras_habitantes.html'), config=config) fig.write_image(os.path.join(output_path_graph, 'n_amostras_habitantes.png'), width=800, height=500, scale=1) fig.show(config=config) # + [markdown] tags=["remove_cell"] # <br> # # # Export # + tags=["remove_cell"] import os from traitlets.config import Config from nbconvert import PythonExporter from nbconvert.preprocessors import TagRemovePreprocessor # + tags=["remove_cell"] input_filepath = os.path.join(os.getcwd(), '01_diretriz.ipynb') output_filepath = os.path.abspath(os.path.join(os.getcwd(), '..', 'src', 'normas', 'diretriz.py')) print(output_filepath) # + tags=["remove_cell"] # Import the exporter c = Config() c.TagRemovePreprocessor.enabled=True c.ClearOutputPreprocessor.enabled=True c.TemplateExporter.exclude_markdown=True c.TemplateExporter.exclude_code_cell=False c.TemplateExporter.exclude_input_prompt=True c.TemplateExporter.exclude_output=True c.TemplateExporter.exclude_raw=True c.TagRemovePreprocessor.remove_cell_tags = ('remove_cell',) c.TagRemovePreprocessor.remove_input_tags = ('remove_cell',) c.TagRemovePreprocessor.remove_all_outputs_tags = ('remove_output',) c.preprocessors = ['TagRemovePreprocessor'] c.PythonExporter.preprocessors = ['nbconvert.preprocessors.TagRemovePreprocessor'] # Configure and run out exporter py_exporter = PythonExporter(config=c) py_exporter.register_preprocessor(TagRemovePreprocessor(config=c), True) # Configure and run out exporter - returns a tuple - first element with html, second with notebook metadata body, metadata = PythonExporter(config=c).from_filename(input_filepath) # Write to output html file with open(output_filepath, 'w', encoding='utf-8') as f: f.write(body) # -
test/01_diretriz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 from typing import Optional import pandas as pd import numpy as np import pathlib import seaborn as sns from covidactnow.datapublic.common_fields import CommonFields from covidactnow.datapublic import common_df from libs.datasets import combined_datasets from libs.datasets import AggregationLevel from libs import top_level_metrics from libs import us_state_abbrev pd.options.display.max_rows = 3000 pd.options.display.max_columns = 3000 sns.set() # - us_timeseries = combined_datasets.load_us_timeseries_dataset() # + mass_timeseries = us_timeseries.get_subset(aggregation_level=AggregationLevel.STATE, state="CT") # Get recent data mass = mass_timeseries.get_data(after="2020-03-15") # equivalent to # us_timeseries.get_data(aggregation_level=AggregationLevel.STATE, state="MA", after="2020-08-01") # or # mass_data = mass_timeseries.data # mass_data.loc[mass_data[CommonFields.DATE] > "2020-08-01"] # or # mass_data.loc[mass_data['date'] > "2020-08-01"] path_to_test_positivity = "~/Downloads/test-positive.csv" # - def mean_percent_diff(s1, s2): return (((s1 - s2) / (s1 + s2)) * 100).abs().mean() def compare_state(state, path_to_test_positivity): state_timeseries = us_timeseries.get_subset(aggregation_level=AggregationLevel.STATE, state=state) # Get recent data data = state_timeseries.get_data(after="2020-03-15") fips = us_state_abbrev.ABBREV_US_FIPS[state] # METRICS API metrics = top_level_metrics.calculate_top_level_metrics_for_fips(fips) test_positivity = pd.Series(metrics["testPositivity"]) # WEBSITE website = pd.read_csv(path_to_test_positivity) website_fips = website[website["fips"] == int(fips)] website_fips["date"] = website_fips["date"].astype("datetime64") #convert to datetime # JOIN_DATA positive_df = test_positivity.to_frame() positive_df = positive_df.rename(columns={0:"testPositivity"}) data_with_positive = data.set_index("date").join(positive_df).reset_index()[["date", "fips", "testPositivity", "positive_tests", "negative_tests"]] # merge to get date website_and_api_calc = data_with_positive.merge(website_fips, on="date", how='left') mean_percent_diff_result = mean_percent_diff(website_and_api_calc["testPositivity"], website_and_api_calc["test-positivity"]) print(f"Website and API have mean % diff: {mean_percent_diff_result}") website_and_api_calc['diff'] = website_and_api_calc['test-positivity'] - website_and_api_calc['testPositivity'] return website_and_api_calc # Website stops computing data when negative tests dont increase multiple days in a row compare_state("CA", path_to_test_positivity).head(10) # some large differences at the beginning of the timeseries compare_state("MA", path_to_test_positivity).head(10) compare_state("CO", path_to_test_positivity).head(10) compare_state("NV", path_to_test_positivity).head(10)
notebooks/top_level_metrics_sandbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Functions # ## Definition # While data type variables let us store... data, functions let us store actual Python code that can be reused :D<br> # In other words: Functions in computer science are similar to mathematical functions. Take e.g. the function f(x)=x²+1. It is a "variable" named which stores the "code" x²+1. After f(x) is defined, we can reference to x²+1 as f(x) in other functions. # ## Simple functions without arguments # Let us define a very simple function which simply prints "A". We'll call this function "func": # Syntax of function definition: # def function_name(): # The two parentheses are neccessary # code def func(): print("A") # We can now use this function, i.e. we can write func() instead of print("A"): # + def func(): print("A") func() func() func() # - # Functions only execute their code if "()" are used after their name, otherwise, they return the function itself: # This is similar to "@function_name" in MATLAB func # In other words, functions are also variables that can be used like them: # + # Here, we copy the function to another one # with the name "func2" def func(): print("A") print("B") print("C") func2 = func func2() # - # Functions can also *return* a value. Similarily to the *return* of f(x)=x² for x=2 is 4, a function can return a value that is the result of the code which is executed in it: # Let's define a simple function which returns the value 3.14 :D def pi(): print("We are going to return Pi :D") return 3.14 pi() # ## Scoping of variables # If we define a variable inside a function... def func(): x = 1 # ...this variable only exists inside the scope of the function, i.e. only in the code lines of the function iteself: def func(): # START OF x's EXISTENCE x = 1 x += 1 # END of x's EXISTENCE print(x) # Similarily if we define a variable outside a function... x = 1 # ...this variable (usually) does not exist inside the function: x = 1 # x does not exists from here... def func(): x += 1 print(x) # ...to here func() # ## Global Variables (most computer scientists hate this trick) # One can still access variables from *outside* a function using the "global" statement followed by the name of the variable: x = 1 def func(): global x # Get access to x from outside the function :O x += 1 print(x) func() # Attention: Only use global variables when they are absolutely neccessary, otherwise they can cause a huge chaos D: # ## Handling of single arguments # Just like mathematical functions, Python functions can take arguments, too. E.g. in the function f(x)=2x, "x" is the argument.<br> # Let us see how to use single-argument functions in Python: # + # Syntax: # def name_of_function(name_of_argument): # code_where_the_argument_can_be_used # (optional return value) # + # Example 1 without return: # Let us define a function with which # we can print the doubled given value def print_doubled_value(value): print(value*2) # Now, we can use this function with # any data type which supports "*" print_doubled_value(2) print_doubled_value("A") # + # Example 2 with return: # Let us define the mathematical function f(x) = x² # or in Python's language, the function f with the argument # x which returns x**2 def f(x): return x**2 # Now, we can use this function with data of the type int or float number = 3 f(number) # - # ## Handling of multiple arguments # Again just like mathematical functions, you can also create Python functions which take multiple arguments: # + # Syntax: # def name_of_function(name_of_argument_1, name_of_argument_2, ...): # code_where_the_argument_can_be_used # (optional return value) # + # Let's define a function which returns # the volume of a cuboid def volume(height, width, length): return height * width * length volume(2, 3, 4) # - # In order to not get confused by many arguments for a function, you can also name the function's arguments :D # + def volume(height, width, length): return height * width * length volume(height=2, width=3, length=4) # - # You can also mix up naming and not naming arguments, unnamed arguments have to be at the beginning and are determined by the order of arguments in the function's definition, followed by named ones: # + def volume(height, width, length): return height * width * length volume(2, 3, length=4) # - # ## Handling of multiple return values # You can also return multiple values with a Python function: # + # Syntax: # def name_of_function(name_of_argument_1, name_of_argument_2, ...): # code_where_the_argument_can_be_used # return name_of_return_value_1, name_of_return_value_2, ... # + def volume_and_is_height_greater_width(height, width, length): volume = height * width * length is_height_greater_width = height > width return volume, is_height_greater_width volume_and_is_height_greater_width(2, 3, 4) # - # As you can see, the normal return type with multiple return values is a tuple. Alternatively, you can also store the single return values by using multiple variables which "catch" the return values: # + def volume_and_is_height_greater_width(height, width, length): volume = height * width * length is_height_greater_width = height > width return volume, is_height_greater_width # Let us catch the single return values :D volume, is_height_greater_width = volume_and_is_height_greater_width(2, 3, 4) print("Volume:") print(volume) print("is_height_greater_width:") print(is_height_greater_width) # - # ## Variable number of arguments # Using "\*" *before* the name of the last argument of a function, you indicate that you want to use the function with a *variable* number of variables. The variables are then given as tuple inside the function: # + def sum_variables(*variables): sum_ = 0 for variable in variables: sum_ += variable return sum_ print(sum_variables(1)) # 1 print(sum_variables(1,2,3)) # 1 + 2 + 3 print(sum_variables(1,2,3,4,5,6)) # 1 + 2 + 3 + 4 + 5 + 6 # - # ## Using functions as argument values ("function pointers") # Since functions can be used like data variables, you can also send a function as an argument to another function :D # + # We define a function which takes an argument # that will be executed as function :D def do_what_the_argument_does(argument): # Syntax: Just put "()" after the argument :-) argument() def print_a(): print("A") def print_b(): print("B") do_what_the_argument_does(print_a) do_what_the_argument_does(print_b) # - # ## Lambda functions / Anonymous functions # Using Python's <b>lambda</b> keyword, you can define a function in one line and without assigning a name to it: # + # Syntax: # lambda argument_1, argument_2, ...: code # - lambda x: 3*x # You can name a lambda function by assigning it to a variable: triple = lambda x: 3*x # Now, it can be used like a normal function :D triple(2) # Lambda functions are very important for higher-order functions which we will see in the next chapter...
8_Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # End-To-End Example: Today's Weather # # This example will input a location and then output today's current weather conditions at that location. # # To complete this example we will use the following API's # # - Google Geocode api to get GPS coordinates for a location eg. Syracuse, NY # - Darksky.net forecast api to get the currnt weather conditions for those GPS coordinates # import requests # + def google_geocode_location(location): url = "https://maps.googleapis.com/maps/api/geocode/json" params = {'address':location} response = requests.get(url,params) location_data = response.json() coords = location_data['results'][0]['geometry']['location'] return coords #main program location = 'Syracuse,NY' coords = google_geocode_location(location) coords # + url = "https://maps.googleapis.com/maps/api/geocode/json" params = {'address':location} response = requests.get(url,params) #location_data = response.json() ##print(coords) type(response) #main program #location = 'Syracuse,NY' #coords = google_geocode_location(location) #coords # + def get_current_weather_from_darksky(coords): url = "https://api.darksky.net/forecast/67fb6248744159aa45f51831736aa1fc/%f,%f"%(coords['lat'],coords['lng']) #print(url) response = requests.get(url) weather_data = response.json() summary = weather_data['currently']['summary'] #summary is the key temp = weather_data['currently']['temperature'] #temperature is the key conditions = {'temp': temp,'summary':summary} return conditions #input coordinates, output conditions location = input("Enter a location and I will give you the current weather: ") coords = google_geocode_location(location) conditions = get_current_weather_from_darksky(coords) print("Current weather is %.1f degress F and %s"%(conditions['temp'],conditions['summary'])) # - weather_data['currently'] location_data = response.json() coords = location_data['results'][0]['geometry']['location'] print(coords) # Todo list # input a location eg. Syracuse, ny # use the google geocode api to get a lat/lng # use the darksky api and lat/lng to get current weather conditions # output current weather conditions (temperature and summary (rain, snow, etc...)) # + import requests #first I write these def google_geocode(location): params = {'address' : location} url = 'http://maps.googleapis.com/maps/api/geocode/json' response = requests.get(url, params = params) geodata = response.json() return geodata def darksky_weather(coords): key = '<KEY>' # sign up for your own key at https://darksky.net/dev url='https://api.darksky.net/forecast/%s/%f,%f' % (key, coords['lat'], coords['lng']) response = requests.get(url) weather = response.json() return weather # then test them here geodata = google_geocode('Syracuse, NY') coords =geodata['results'][0]['geometry']['location'] weather = darksky_weather(coords) weather # - import requests # Now I can write the entire program... location = input("Where are you? (eg. Syracuse, NY) : ") geodata = google_geocode(location) coords =geodata['results'][0]['geometry']['location'] weather = darksky_weather(coords) current = weather['currently'] print("Current conditions in %s are %s with a temperature of %.0f degrees." % (location, current['summary'], current['temperature']))
content/lessons/11/End-To-End-Example/ETEE-Todays-Weather.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Declare Dependencies import pandas as pd from splinter import Browser from bs4 import BeautifulSoup as bs # Choose the executable path to driver executable_path = {'executable_path': '/Users/keana/Downloads/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) # ## Mars News # Visit Nasa news url through splinter module url = 'https://mars.nasa.gov/news/' browser.visit(url) # + # HTML Object html = browser.html # Parse HTML with Beautiful Soup soup = bs(html, 'html.parser') # + # Retrieve the latest element that contains news title and news_paragraph articles = soup.find_all("div", class_='list_text') news_date = articles[0].find('div', class_='list_date').text news_title = articles[0].find("div", class_="content_title").text news_p = articles[0].find('div', class_='article_teaser_body').text # Display scrapped data print(news_date) print(news_title) print(news_p) # - # ## Mars Images # Visit Mars Space Images through splinter module image_url_featured = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(image_url_featured) # + # HTML Object html_image = browser.html # Parse HTML with Beautiful Soup soup = bs(html_image, 'html.parser') # + # Retrieve background-image url from style tag featured_image_url = soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1] # Website Url main_url = 'https://www.jpl.nasa.gov' # Concatenate website url with scrapped route featured_image_url = main_url + featured_image_url # Display full link to featured image featured_image_url # - # ## Mars Facts # + # Visit Mars facts url facts_url = 'http://space-facts.com/mars/' # Use Panda's `read_html` to parse the url mars_facts = pd.read_html(facts_url) # Find the mars facts DataFrame in the list of DataFrames as assign it to `mars_df` mars_df = mars_facts[0] # Assign the columns mars_df.columns = ['Description','Value'] # Set the index to the `Description` column without row indexing mars_df.set_index('Description', inplace=True) # Save html code mars_df.to_html('mars_facts.html') # Display mars_df mars_df # - # ## Mars Hemispheres # # Visit hemispheres website through splinter module hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(hemispheres_url) # + # HTML Object html_hemispheres = browser.html # Parse HTML with Beautiful Soup soup = bs(html_hemispheres, 'html.parser') # + # Retreive all items that contain mars hemispheres information items = soup.find_all('div', class_='item') # Create empty list for hemisphere urls hemisphere_image_urls = [] # Store the main_ul hemispheres_main_url = 'https://astrogeology.usgs.gov' # Loop through the items previously stored for i in items: # Store title title = i.find('h3').text # Store link that leads to full image website partial_img_url = i.find('a', class_='itemLink product-item')['href'] # Visit the link that contains the full image website browser.visit(hemispheres_main_url + partial_img_url) # HTML Object of individual hemisphere information website partial_img_html = browser.html # Parse HTML with Beautiful Soup for every individual hemisphere information website soup = bs( partial_img_html, 'html.parser') # Retrieve full image source img_url = hemispheres_main_url + soup.find('img', class_='wide-image')['src'] # Append the retreived information into a list of dictionaries hemisphere_image_urls.append({"title" : title, "img_url" : img_url}) # Display hemisphere_image_urls hemisphere_image_urls
Mission_to_Mars/mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="udDs_biH0n5U" colab_type="text" # #### Copyright 2020 Google LLC. # + id="WPY-OyyM0pSs" colab_type="code" colab={} # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="psnUF-8c02o_" colab_type="text" # # Reformer: Image Generation [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/trax/blob/master/trax/models/reformer/image_generation.ipynb) # + [markdown] id="1lnRd_IoERdk" colab_type="text" # This notebook was designed to run on TPU. # # To use TPUs in Colab, click "Runtime" on the main menu bar and select Change runtime type. Set "TPU" as the hardware accelerator. # + id="8PluCmWbZIpJ" colab_type="code" colab={} # Install JAX. This custom build raises the TPU timeout threshold, because the # default limit of 2 minutes is too short for sampling very long sequences. # !gsutil cp gs://trax-ml/reformer/jaxlib-0.1.39-cp36-none-manylinux2010_x86_64.whl . # !gsutil cp gs://trax-ml/reformer/jax-0.1.59-cp36-none-manylinux2010_x86_64.whl . # !pip install --upgrade -q ./jaxlib-0.1.39-cp36-none-manylinux2010_x86_64.whl # !pip install --upgrade -q ./jax-0.1.59-cp36-none-manylinux2010_x86_64.whl # Make sure the Colab Runtime is set to Accelerator: TPU. import requests import os if 'TPU_DRIVER_MODE' not in globals(): url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206' resp = requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. from jax.config import config config.FLAGS.jax_xla_backend = "tpu_driver" config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR'] print(config.FLAGS.jax_backend_target) # + id="yiPdBenoZwH6" colab_type="code" colab={} # !pip install --upgrade -q gin git+https://github.com/google/trax.git@v1.2.3 from tensorflow.compat.v1.io.gfile import GFile import gin import os import jax import trax from trax.models.beam_search import Search from trax.supervised import inputs import numpy as onp import jax.numpy as np from scipy.special import softmax # + id="yyxRk75iaAap" colab_type="code" colab={} # %matplotlib inline from matplotlib import pyplot as plt # + [markdown] colab_type="text" id="FQ89jHCYfhpg" # ## Load example data and model # + id="qBvuw2h85WXE" colab_type="code" colab={} # Normally we train on the full imagenet64 training set, which is quite large so # we won't be loading it from this notebook. Instead, let's just load a few PNG # images to use in our data pipeline. DATA = [] for i in range(8): img = plt.imread(GFile('gs://trax-ml/reformer/img{}.png'.format(i), 'rb')) # Convert from RGBA floating-point to RGB integer representation. img = onp.asarray(img[:, :, :3] * 255, dtype=onp.int32) DATA.append(img) # + id="oBZh0Q2UEiaB" colab_type="code" outputId="d5adcac0-6f76-4c56-e6ef-74becaca87be" colab={"base_uri": "https://localhost:8080/", "height": 130} # We can examine one of the images to make sure we've loaded it correctly. plt.figure(figsize=(1.5, 1.5)) plt.axis('off') plt.imshow(DATA[0]) # + id="VXjtCPxl3I82" colab_type="code" colab={} # We'll be using a pre-trained 12-layer Reformer model. # First, load the config (which sets all needed hyperparameters). # !gsutil cp gs://trax-ml/reformer/imgnet64/config.gin ./config.gin gin.parse_config_file('./config.gin') # + id="NhiTshPPbvLY" colab_type="code" colab={} # Now we construct a ReformerLM instance and load the pre-trained weights. # The 'predict' mode configures the model to accept single tokens at a time, # instead of feeding in a complete image all at once. model_infer = trax.models.ReformerLM(mode='predict') model_infer.init_from_file( 'gs://trax-ml/reformer/imgnet64/model.pkl', weights_only=True) # + [markdown] id="zY3hpgnI5Rgn" colab_type="text" # ## Sample from the model # + [markdown] id="PnzRPCzFqIVi" colab_type="text" # Now we're ready to sample from the pre-trained Reformer model. Unlike during training, sampling processes the images one pixel and channel value at a time. The TPU colab runtime has 8 cores so we can sample 8 images in parallel. # + id="W9ZetV91PujO" colab_type="code" colab={} sampling_decoder = Search( trax.models.ReformerLM, model_infer.weights, temperature=1.0, max_decode_len=32*64*3, ) # + [markdown] id="HOLawc5dB7QV" colab_type="text" # Sampling is an inherently serial process and will take up to 9 minutes to run. A good chunk of that time will be spent on JIT-compiling the code, though, so the code cell below will finish faster when re-run for a second time. # + id="We9Jj9Rap3cB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 214} outputId="10b6142b-11f1-414d-9b63-353f721a6a82" flat_prompt = [] for i, img in enumerate(DATA[:trax.math.device_count()]): img = img.reshape((-1, 64, 3))[:32, :, :] flat_prompt.append(img.reshape((-1,))) prompt = onp.stack(flat_prompt, 0) print("Prompt:") plt.figure(figsize=(10, 10*8)) for i in range(prompt.shape[0]): plt.subplot(1, 8, i+1) plt.axis('off') plt.imshow(prompt[i].reshape((-1, 64, 3)), aspect='equal') plt.show() seqs, scores = sampling_decoder.decode(targets_prefix=prompt, batch_size=8) print("Sampled completions:") plt.figure(figsize=(10, 10*8)) for i in range(prompt.shape[0]): plt.subplot(1, 8, i+1) plt.axis('off') plt.imshow(seqs[i, -1].reshape((-1, 64, 3)), aspect='equal') plt.figure(figsize=(10, 10*8)) for i in range(prompt.shape[0]): plt.subplot(1, 8, i+1) plt.axis('off') img = np.concatenate([prompt[i], seqs[i, -1]], -1) plt.imshow(img.reshape((-1, 64, 3)), aspect='equal') # + id="olF4PpORpCTK" colab_type="code" colab={}
trax/models/reformer/image_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Registration of $n$-cameras from their pairwise poses # # ### Goal # # Suppose $n$ cameras observe a number of 3D points whose geometry are not known. Using 2D-2D correspondences, we assume the relative pose of each pair in the $n$ cameras is given as $R_{ij}, t_{ij}$ satisfying $X_j = R_{ij} X_i + t_{ij}$ where $|t_{ij}| = 1$. # * Notice that the scale of $t$ cannot be determined from 2D-2D correspondences, and hence $|t_{ij}|=1$. # * The $n$ cameras do not necessarily observe all the points. On estimating the relative pose, the camera pair can use only the points visible (identified) from both. # * Some pairs of the cameras may not observe a sufficient number of points for their relative pose estimation (e.g. they do not share the field-of-view). This is OK, as long as each camera is *connected* to 2 cameras. That is, given a camera graph $G$ whose edges denote camera pairs having their relative poses estimated, the minimum degree of the graph $\delta(G)$ should satisfy $\delta(G) \ge 2$. # # Given $R_{ij}, t_{ij}$ pairs, this notebook estimates their poses $R_{i}, t_{i}$ in a unified coordinate system. In particular, we use the first camera coordinate system as the world coordinate system, i.e., $R_1 = I_{3{\times}3}, t_1 = (0, 0, 0)^\top$. # # * Input: # * $R_{ij}, t_{ij}$ # * Output: # * $R_{i}, t_{i}$ # # ### References # * <NAME> and <NAME>. "Robust Rotation and Translation Estimation in Multiview Reconstruction," CVPR 2007. # * <NAME>, <NAME>, and <NAME>. "A global linear method for camera pose registration," ICCV 2013. # # ## Libraries # + # %matplotlib notebook import sys, os, cv2 import numpy as np from glob import glob import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) from pycalib.plot import plotCamera from pycalib.calib import lookat, triangulate, excalib2, pose_registration, rebase # - # ## Synthetic data # + # 3D points # X_gt = (np.random.rand(16, 3) - 0.5)*5 # random points centered at [0, 0, 0] X_gt = np.array(np.meshgrid(np.linspace(-1, 1, 3), np.linspace(-1, 1, 3), np.linspace(-1, 1, 3))).reshape((3, -1)).T # 3D grid points Np = X_gt.shape[0] print('X_gt:', X_gt.shape) # Camera intrinsics K = np.array([[600, 0, 320], [0, 600, 240], [0, 0, 1]]).astype(np.float) # VGA camera # Camera poses: cameras are at the vertices of a hexagon t = 2 * np.pi / 5 * np.arange(5) v_gt = np.vstack((10*np.cos(t), 10*np.sin(t), np.zeros(t.shape))).T Nc = v_gt.shape[0] R_gt = [] t_gt = [] P_gt = [] rvec_gt = [] for i in range(Nc): t = v_gt[i,:] R, t = lookat(t, np.zeros(3), np.array([0, 1, 0])) R_gt.append(R) t_gt.append(t) P_gt.append(K @ np.hstack((R, t))) rvec_gt.append(cv2.Rodrigues(R)[0]) R_gt = np.array(R_gt) t_gt = np.array(t_gt) P_gt = np.array(P_gt) rvec_gt = np.array(rvec_gt) print('R_gt:', R_gt.shape) print('t_gt:', t_gt.shape) print('P_gt:', P_gt.shape) print('rvec_gt:', rvec_gt.shape) # 2D observations points x = [] for i in range(Nc): xt = cv2.projectPoints(X_gt.reshape((-1, 1, 3)), rvec_gt[i], t_gt[i], K, None)[0].reshape((-1, 2)) x.append(xt) x = np.array(x) print('x:', x.shape) # Verify triangulation Y = [] for i in range(Np): y = triangulate(x[:,i,:].reshape((-1,2)), P_gt) #print(y) Y.append(y) Y = np.array(Y).T Y = Y[:3,:] / Y[3,:] assert np.allclose(0, X_gt - Y.T) # Verify z > 0 at each camera for i in range(Nc): Xc = R_gt[i] @ X_gt.T + t_gt[i] assert np.all(Xc[2, :] > 0) # - # ## Relative pose estimation # # Now we have a set of corresponding points for each pair of the cameras. Given such correspondences, the cell below does 2-view camera calibration for each pair. # Rt_pairs = dict() for i in range(Nc-1): for j in range(i+1, Nc): R, t, _, _, _ = excalib2(x[i], x[j], K, np.zeros(5), K, np.zeros(5)) Rt_pairs[i, j] = np.hstack((R, t)) # ## Pose registration # + # Registration R, t, err_r, err_t = pose_registration(Nc, Rt_pairs) # Transform to make Camera0 be WCS R_est = [] t_est = [] for c in reversed(range(Nc)): Rx, tx = rebase(R[:3, :3], t[:3], R[3*c:3*c+3, :3], t[3*c:3*c+3]) R_est.append(Rx) t_est.append(tx) R_est = np.array(R_est[::-1]) t_est = np.array(t_est[::-1]) # This estimation is up-to-scale. So normalize by the cam1-cam2 distance. for c in reversed(range(Nc)): t_est[c] /= np.linalg.norm(t_est[1]) # - # ## Triangulate 3D points # + # Projection matrix P_est = [] for i in range(Nc): P_est.append(K @ np.hstack((R_est[i], t_est[i]))) P_est = np.array(P_est) # Triangulate 3D points Y_est = [] for i in range(Np): y = triangulate(x[:2,i,:].reshape((-1,2)), P_est[:2]) Y_est.append(y) Y_est = np.array(Y_est).T Y_est = Y_est[:3,:] / Y_est[3,:] # - # ## Plot fig = plt.figure() ax = Axes3D(fig) #ax.set_aspect('equal') ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_zlim(0, 1) ax.plot(Y_est[0,:], Y_est[1,:], Y_est[2,:], "o") cmap = plt.get_cmap("tab10") for i in range(Nc): plotCamera(ax, R_est[i].T, - R_est[i].T @ t_est[i], cmap(i), 0.05) fig.show() # ## Exercise # # 1. Add Gaussian noise (e.g. $\mu=0, \sigma=1 \mathrm{px}$) to the 2D observations `x`, and see how the results are degraded. # 2. Add outliers to `x`. # 3. Introduce distortion correction. # * Assume distorted observations and distortion coefficients are given. Rectify the observations as a preprocessing. # 4. Remove some *connections* between cameras. In the example above, all the cameras are connected each other, i.e., each camera knows the relative pose to all the others. See what happens if the cameras have less connections. # 5. This approach can fail if three camera positions are colinear. Detect such cases and use another method (such as PnP) to solve it.
ipynb/ncam_registration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Run spike sorting algorithms # ============================ # # This example shows the basic usage of the :code:`sorters` module of :code:`spikeinterface` # # import spikeinterface.extractors as se import spikeinterface.sorters as ss # First, let's create a toy example: # # recording, sorting_true = se.example_datasets.toy_example(duration=10, seed=0) # Check available sorters # -------------------------- # # # print(ss.available_sorters()) # This will list the sorters installed in the machine. Each spike sorter # is implemented in a class. To access the class names you can run: # # print(ss.installed_sorter_list) # Change sorter parameters # ----------------------------------- # # # default_ms4_params = ss.Mountainsort4Sorter.default_params() print(default_ms4_params) # Parameters can be changed either by passing a full dictionary, or by # passing single arguments. # # # + # Mountainsort4 spike sorting default_ms4_params['detect_threshold'] = 4 default_ms4_params['curation'] = False # parameters set by params dictionary sorting_MS4 = ss.run_mountainsort4(recording=recording, **default_ms4_params, output_folder='tmp_MS4') # - # parameters set by params dictionary sorting_MS4_10 = ss.run_mountainsort4(recording=recording, detect_threshold=10, output_folder='tmp_MS4') print('Units found with threshold = 4:', sorting_MS4.get_unit_ids()) print('Units found with threshold = 10:', sorting_MS4_10.get_unit_ids()) # Run other sorters # ------------------ # # # + # SpyKING Circus spike sorting # sorting_SC = ss.run_spykingcircus(recording, output_folder='tmp_SC') # print('Units found with Spyking Circus:', sorting_SC.get_unit_ids()) # + # KiloSort spike sorting (KILOSORT_PATH and NPY_MATLAB_PATH can be set as environment variables) # sorting_KS = ss.run_kilosort(recording, output_folder='tmp_KS') #  print('Units found with Kilosort:', sorting_KS.get_unit_ids()) # + # Kilosort2 spike sorting (KILOSORT2_PATH and NPY_MATLAB_PATH can be set as environment variables) # sorting_KS2 = ss.run_kilosort2(recording, output_folder='tmp_KS2') #  print('Units found with Kilosort2', sorting_KS2.get_unit_ids()) # + # Klusta spike sorting #  sorting_KL = ss.run_klusta(recording, output_folder='tmp_KL') # print('Units found with Klusta:', sorting_KL.get_unit_ids()) # + # IronClust spike sorting (IRONCLUST_PATH can be set as environment variables) # sorting_IC = ss.run_ironclust(recording, output_folder='tmp_IC') # print('Units found with Ironclust:', sorting_IC.get_unit_ids()) # + # Tridesclous spike sorting # sorting_TDC = ss.run_tridesclous(recording, output_folder='tmp_TDC') # print('Units found with Tridesclous:', sorting_TDC.get_unit_ids())
Notebooks/spikeinterface_examples/plot_1_sorters_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import geopandas as gpd from shapely.geometry import mapping from pystac_client import Client from pystac import ItemCollection def make_geom(aoi_filename): '''creates a geom/Polygon/Coordinates - bbox - from an geojson file''' # read in AOI as a GeoDataFrame aoi = gpd.read_file(aoi_filename) # get the geometry of the AOI as a dictionary for use with PySTAC Client geom = mapping(aoi.to_dict()['geometry'][0]) return geom help( make_geom ) aoi_geojson_file = 'siouxFalls.geojson' geom = make_geom(aoi_geojson_file) geom type(geom) geom['coordinates'] def get_stac_records_sentinel_search(geom): # STAC API - Landsat Collection 2 url = "https://earth-search.aws.element84.com/v0" # Search parameters params = { "collections": ["sentinel-s2-l2a-cogs"], "intersects": geom, "datetime": "2020-05-01/2021-12-31", "limit": 100, "query": ["eo:cloud_cover<5"] } cat = Client.open(url) search = cat.search(**params) matched = search.matched() print(f"{search.matched()} scenes found") return(search) thing = get_stac_records_sentinel_search(geom) type(thing) dir(thing) search_dict = thing.get_all_items_as_dict()['features'] search_items = thing.get_all_items() # + #(items_dict, item_collection) = get_stac_records_sentinel(geom) # - search_items[0] len(search_dict) search_dict[0]['assets'].keys() # + import yaml from odc import stac from pyproj import CRS from pystac.extensions.projection import ProjectionExtension def open_odc(items, crs=None, resolution=None): configuration_str = """--- landsat-c2l2-sr: measurements: '*': dtype: float32 nodata: 0 units: 'm' """ configuration = yaml.load(configuration_str, Loader=yaml.CSafeLoader) datasets = list(stac.stac2ds(items, configuration)) crs_str = str(items[0].properties['proj:epsg']) crs = f'EPSG:{crs_str}' resolution=(-10, 10) data = stac.dc_load(datasets, bands=['B04', 'B03', 'B02', 'B09'], chunks={"x": 1024, "y": 1024}, output_crs=crs, resolution=resolution) return data # + # #!conda install -y odc-stac # - _datacube = open_odc(search_items) _datacube import rioxarray datacube = _datacube.rio.clip([geom], crs='epsg:4326') datacube def nc_from_ds(DS, filename): DS.time.attrs = {} #this allowed the nc to be written #DS.SCL.attrs = {} ds1 = DS.drop(labels='spatial_ref') ds1.to_netcdf(filename) # %%time nc_from_ds(datacube, 'siouxFalls.nc') # + # #! ls ~/.aws/ # + # #! echo '*.nc' > .gitignore # -
3_Dec_2021/01_pystac_client_dissect_sentinel-deep-detail.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd ds = pd.read_excel('proposed_gs_otherapproaches_1750news.xlsx',sheet_name=1) ds.columns subset_columns = ['Cluster','Zero-Order', 'M-Order', 'T-Order','Proposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)','TextRankWindow24','PositionRank','Yake','JensenShannon','Multipartite Rank'] subset = ds[subset_columns] subset # open a file using with statement top_docs = [] with open("top10docs_tweets.txt",'r',encoding='utf-8') as fh: for curline in fh: # check if the current line # starts with "" if curline.startswith("Cluster"): #do nothing print(curline) elif curline.startswith("="): #do nothing print(curline) else: if len(curline) > 1: top_docs.append(curline.rstrip("\n")) len(top_docs) top_docs # + top_docs_in_clusters = [''] * 9 for i in range(0,len(top_docs)): if i <5: top_docs_in_clusters[0] = top_docs_in_clusters[0] + top_docs[i] + "$" elif i <10: top_docs_in_clusters[1] = top_docs_in_clusters[1] + top_docs[i] + "$" elif i <15: top_docs_in_clusters[2] = top_docs_in_clusters[2] + top_docs[i] + "$" elif i <20: top_docs_in_clusters[3] = top_docs_in_clusters[3] + top_docs[i] + "$" elif i <25: top_docs_in_clusters[4] = top_docs_in_clusters[4] + top_docs[i] + "$" elif i <30: top_docs_in_clusters[5] = top_docs_in_clusters[5] + top_docs[i] + "$" elif i <35: top_docs_in_clusters[6] = top_docs_in_clusters[6] + top_docs[i] + "$" elif i <40: top_docs_in_clusters[7] = top_docs_in_clusters[7] + top_docs[i] + "$" elif i <45: top_docs_in_clusters[8] = top_docs_in_clusters[8] + top_docs[i] + "$" # - top_docs_in_clusters[0] top_10_words_ds = pd.read_csv("cluster_top10_tweets.csv",sep='\t',encoding='utf-8') top_10_words = top_10_words_ds['Top 10 Terms'] subset['Top10Terms'] = top_10_words subset['Top5Docs'] = top_docs_in_clusters subset['Proposed Method'] = subset['Proposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)'] del subset['Proposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)'] subset subset.to_csv('all_labels_comparison_with_details1223_tweets.csv', sep='\t',index=False,encoding='utf-8')
All Methods Labels With Detail for Flask app-Tweets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd import nltk from nltk.stem import WordNetLemmatizer wordNet=WordNetLemmatizer() from nltk.stem.snowball import SnowballStemmer,PorterStemmer ps=PorterStemmer() def remove_stopwords(st,sw): output=[ps.stem(w) for w in st if not w in sw] return output messages=pd.read_csv('smsspamcollection/SMSSpamCollection',sep='\t',names=["label","message"]) messages #sep='\t' as columns are tab seperated as y and x #data preprocessing and cleaning from nltk.corpus import stopwords sw=set(stopwords.words('english')) from nltk.tokenize import RegexpTokenizer re=RegexpTokenizer('[a-zA-Z]+') def customTokenizer(corpus): words=re.tokenize(corpus.lower()) words=remove_stopwords(words,sw) return words x=messages['message'] y=pd.get_dummies(messages['label']) # to make spam as 1 and ham as 0 # can also use label encoding y=y.iloc[:,1].values#spam y from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0) #bag of words from sklearn.feature_extraction.text import CountVectorizer corpus=messages['message'] #instead of using customer tokenizer we can also use for loop for each document in corpus and append #sentences or documents with stopwords and lemmataization document by document to form new corpus cv=CountVectorizer(tokenizer=customTokenizer) x_train=cv.fit_transform(x_train).toarray() x_test=cv.transform(x_test).toarray() print(x_train.shape)#(4457,5659) #5659 words in vocab print(len(cv.vocabulary_)) #cv.vocabulary_ #so instead of 5659 words lets take top 2500 most frequent words with max_features #Training model using Navive Bayes from sklearn.naive_bayes import MultinomialNB spam_model=MultinomialNB().fit(x_train,y_train) y_pred=spam_model.predict(x_test) from sklearn.metrics import confusion_matrix,accuracy_score confusion_m=confusion_matrix(y_test,y_pred) accuracy=accuracy_score(y_test,y_pred) accuracy #lets test out max features effect on accuracy x=messages['message'] y=pd.get_dummies(messages['label']) y=y.iloc[:,1].values ans=[] maxx=0 for i in range(2000,5659,100): x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0) cv=CountVectorizer(max_features=i,tokenizer=customTokenizer) x_train=cv.fit_transform(x_train).toarray() x_test=cv.transform(x_test).toarray() spam_model=MultinomialNB().fit(x_train,y_train) y_pred=spam_model.predict(x_test) accuracy=accuracy_score(y_test,y_pred) ans.append(accuracy) if accuracy>maxx: maxx=accuracy z=i print(maxx,z) r=np.arange(2000,5659,100) import matplotlib.pyplot as plt plt.plot(r,ans)
SpamClassifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbsphinx="hidden" import open3d as o3d import numpy as np import os import sys # monkey patches visualization and provides helpers to load geometries sys.path.append('..') import open3d_tutorial as o3dtut # change to True if you want to interact with the visualization windows o3dtut.interactive = not "CI" in os.environ # - # # Working with NumPy # Data structure of Open3D is natively compatible with [NumPy](https://numpy.org/) buffer. The following tutorial generates a variant of sync function using NumPy and visualizes the function using Open3D. # # First, we generates a $nx3$ matrix `xyz`. Each column has $x$, $y$, $z$ value of a function $z = \frac{sin(x^2 + y^2)}{x^2 + y^2}$. $z_\text{norm}$ is the normalized map of $z$ for the [0,1] range. # generate some neat n times 3 matrix using a variant of sync function x = np.linspace(-3, 3, 401) mesh_x, mesh_y = np.meshgrid(x, x) z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2))) z_norm = (z - z.min()) / (z.max() - z.min()) xyz = np.zeros((np.size(mesh_x), 3)) xyz[:, 0] = np.reshape(mesh_x, -1) xyz[:, 1] = np.reshape(mesh_y, -1) xyz[:, 2] = np.reshape(z_norm, -1) print('xyz') print(xyz) # ## From NumPy to open3d.PointCloud # Open3D provides conversion from NumPy matrix to a vector of 3D vectors. By using `Vector3dVector`, a NumPy matrix can be directly assigned to `open3d.PointCloud.points`. # # In this manner, any similar data structure such as `open3d.PointCloud.colors` or `open3d.PointCloud.normals` can be assigned or modified using NumPy. The code below also saves the point cloud as a ply file for the next step. # Pass xyz to Open3D.o3d.geometry.PointCloud and visualize pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(xyz) o3d.io.write_point_cloud("../../TestData/sync.ply", pcd) # ## From open3d.PointCloud to NumPy # As shown in this example, `pcd_load.points` of type `Vector3dVector` is converted into a NumPy array using `np.asarray`. # + # Load saved point cloud and visualize it pcd_load = o3d.io.read_point_cloud("../../TestData/sync.ply") # convert Open3D.o3d.geometry.PointCloud to numpy array xyz_load = np.asarray(pcd_load.points) print('xyz_load') print(xyz_load) o3d.visualization.draw_geometries([pcd_load])
examples/Python/Basic/working_with_numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The Goal here (pun intended) is to design a prediction system which can accurately predict if the home team will win or not. We will use the final dataset got by our earlier "Scraping and Cleaning" Notebook build our prediction model on. # + # Import the necessary libraries. import pandas as pd import numpy as np import xgboost as xgb from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from IPython.display import display # %matplotlib inline # + # Read data and drop redundant column. data = pd.read_csv('../predictions/static/predictions/data/dataset.csv') # Remove first 3 matchweeks data = data[data.MW > 3] data.drop(['Unnamed: 0','HomeTeam', 'AwayTeam', 'Date', 'MW', 'HTFormPtsStr', 'ATFormPtsStr', 'FTHG', 'FTAG', 'HTGS', 'ATGS', 'HTGC', 'ATGC','HomeTeamLP', 'AwayTeamLP','DiffPts','HTFormPts','ATFormPts', 'HM4','HM5','AM4','AM5','HTLossStreak5','ATLossStreak5','HTWinStreak5','ATWinStreak5', 'HTWinStreak3','HTLossStreak3','ATWinStreak3','ATLossStreak3'],1, inplace=True) # Preview data. display(data.head()) # - # ## Data Exploration # + # Total number of students. n_matches = data.shape[0] # Calculate number of features. n_features = data.shape[1] - 1 # Calculate matches won by home team. n_homewins = len(data[data.FTR == 'H']) # Calculate win rate for home team. win_rate = (float(n_homewins) / (n_matches)) * 100 # Print the results print ("Total number of matches: {}".format(n_matches)) print ("Number of features: {}".format(n_features)) print ("Number of matches won by home team: {}".format(n_homewins)) print("Win rate of home team: {:.2f}%".format(win_rate)) # + # Visualising distribution of data from pandas.tools.plotting import scatter_matrix scatter_matrix(data[['HTGD','ATGD','HTP','ATP','DiffFormPts','DiffLP']], figsize=(10,10)) # - # ## Preparing the Data # + # Separate into feature set and target variable X_all = data.drop(['FTR'],1) y_all = data['FTR'] # Standardising the data. from sklearn.preprocessing import scale cols = [['HTGD','ATGD','HTP','ATP','DiffLP']] for col in cols: X_all[col] = scale(X_all[col]) # + X_all.HM1 = X_all.HM1.astype('str') X_all.HM2 = X_all.HM2.astype('str') X_all.HM3 = X_all.HM3.astype('str') X_all.AM1 = X_all.AM1.astype('str') X_all.AM2 = X_all.AM2.astype('str') X_all.AM3 = X_all.AM3.astype('str') def preprocess_features(X): ''' Preprocesses the football data and converts catagorical variables into dummy variables. ''' # Initialize new output DataFrame output = pd.DataFrame(index = X.index) # Investigate each feature column for the data for col, col_data in X.iteritems(): # If data type is categorical, convert to dummy variables if col_data.dtype == object: col_data = pd.get_dummies(col_data, prefix = col) # Collect the revised columns output = output.join(col_data) return output X_all = preprocess_features(X_all) print("Processed feature columns ({} total features):\n{}".format(len(X_all.columns), list(X_all.columns))) # - # Show the feature information by printing the first five rows print("\nFeature values:") display(X_all.head()) # + from sklearn.model_selection import train_test_split # Shuffle and split the dataset into training and testing set. X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size = 50, random_state = 2, stratify = y_all) # - # ## Training and Evaluating Models # + from time import time from sklearn.metrics import f1_score def train_classifier(clf, X_train, y_train): ''' Fits a classifier to the training data. ''' # Start the clock, train the classifier, then stop the clock start = time() clf.fit(X_train, y_train) end = time() # Print the results print("Trained model in {:.4f} seconds".format(end - start)) def predict_labels(clf, features, target): ''' Makes predictions using a fit classifier based on F1 score. ''' # Start the clock, make predictions, then stop the clock start = time() y_pred = clf.predict(features) end = time() # Print and return results print("Made predictions in {:.4f} seconds.".format(end - start)) return f1_score(target, y_pred, pos_label='H'), sum(target == y_pred) / float(len(y_pred)) def train_predict(clf, X_train, y_train, X_test, y_test): ''' Train and predict using a classifer based on F1 score. ''' # Indicate the classifier and the training set size print("Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train))) # Train the classifier train_classifier(clf, X_train, y_train) # Print the results of prediction for both training and testing f1, acc = predict_labels(clf, X_train, y_train) print(f1, acc) print("F1 score and accuracy score for training set: {:.4f} , {:.4f}.".format(f1 , acc)) f1, acc = predict_labels(clf, X_test, y_test) print("F1 score and accuracy score for test set: {:.4f} , {:.4f}.".format(f1 , acc)) # + # Initialize the three models (XGBoost is initialized later) clf_A = LogisticRegression(random_state = 42) clf_B = SVC(random_state = 912, kernel='rbf') clf_C = xgb.XGBClassifier(seed = 82) train_predict(clf_A, X_train, y_train, X_test, y_test) print('') train_predict(clf_B, X_train, y_train, X_test, y_test) print('') train_predict(clf_C, X_train, y_train, X_test, y_test) print('') # - # **Clearly XGBoost seems like the best model as it has the highest F1 score and accuracy score on the test set.** # # Tuning the parameters of XGBoost. # + # Import 'GridSearchCV' and 'make_scorer' from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer # Create the parameters list you wish to tune parameters = { 'learning_rate' : [0.1], 'n_estimators' : [40], 'max_depth': [3], 'min_child_weight': [3], 'gamma':[0.4], 'subsample' : [0.8], 'colsample_bytree' : [0.8], 'scale_pos_weight' : [1], 'reg_alpha':[1e-5] } # Initialize the classifier clf = xgb.XGBClassifier(seed=2) # Make an f1 scoring function using 'make_scorer' f1_scorer = make_scorer(f1_score,pos_label='H') # Perform grid search on the classifier using the f1_scorer as the scoring method grid_obj = GridSearchCV(clf, scoring=f1_scorer, param_grid=parameters, cv=5) # Fit the grid search object to the training data and find the optimal parameters grid_obj = grid_obj.fit(X_train,y_train) # Get the estimator clf = grid_obj.best_estimator_ print(clf) # Report the final F1 score for training and testing after parameter tuning f1, acc = predict_labels(clf, X_train, y_train) print("F1 score and accuracy score for training set: {:.4f} , {:.4f}.".format(f1 , acc)) f1, acc = predict_labels(clf, X_test, y_test) print("F1 score and accuracy score for test set: {:.4f} , {:.4f}.".format(f1 , acc)) # - # # Fitting the model on the whole dataset for future predictions. # + # Import 'GridSearchCV' and 'make_scorer' from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer from pprint import pprint # Create the parameters list you wish to tune parameters = { 'learning_rate' : [0.03], 'n_estimators' : [20], 'max_depth': [5], 'min_child_weight': [5], 'gamma':[0.2], 'subsample':[0.8], 'colsample_bytree':[0.8], 'scale_pos_weight' : [1], 'reg_alpha':[1e-2] } # Initialize the classifier clf = xgb.XGBClassifier(seed=2) # Make an f1 scoring function using 'make_scorer' f1_scorer = make_scorer(f1_score,pos_label='H') # Perform grid search on the classifier using the f1_scorer as the scoring method grid_obj = GridSearchCV(clf, scoring=f1_scorer, param_grid=parameters, cv=5) # Fit the grid search object to the training data and find the optimal parameters grid_obj = grid_obj.fit(X_all,y_all) # Get the estimator clf = grid_obj.best_estimator_ print (clf) # Report the final F1 score for training and testing after parameter tuning f1, acc = predict_labels(clf, X_train, y_train) print("F1 score and accuracy score for training set: {:.4f} , {:.4f}.".format(f1 , acc))
docs/Prediction Engine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color=darkred>Laboratory 15: "It's a Wrap" </font> # Preamble script block to identify host, user, and kernel import sys # ! hostname # ! whoami print(sys.executable) print(sys.version) print(sys.version_info) # ## Full name: # ## R#: # ## HEX: # ## Title of the notebook # ## Date: # ![](https://cdn.fstoppers.com/styles/full/s3/photos/3846/07/01/ef0647b69044f1ce74101f88bbb40389.jpg?itok=_bpyMLKh) # # #### Step0- Import the necessary libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import statistics import scipy.stats import seaborn as sns # #### Step1- A case of Mercury contamination of groundwater is reported. Our field operation team has just returned from the first round of sampling. During the initial sampling phase, three set of 20 samples were extracted from three wells are brought to the laboratory as a file. The units are "nanograms per liter (ng/l)" for mercury per liter of groundwater. Read the "lab15_minidf.csv" file as a dataframe. data = pd.read_csv("lab15_minidf.csv") data # #### Step2- Let's explore the dataset. data.info() # #### Step3- Use descriptive statistics and get an estimate of the center of the distribution for each set # + #For set1: set1 = data['Set1'] print('For set 1',' the arithmetic mean is: ',set1.mean()) print('For set 1',' the median is: ',set1.median()) # + #For set2: set2 = data['Set2'] print('For set 2',' the arithmetic mean is: ',set2.mean()) print('For set 2',' the median is: ',set2.median()) # + #For set3: set3 = data['Set3'] print('For set 3',' the arithmetic mean is: ',set3.mean()) print('For set 3',' the median is: ',set3.median()) # - # #### Step4- Use descriptive statistics and quantify the spread of data points for each set #For set1: print('For set 1',' the range is: ',np.ptp(set1)) print('For set 1',' the IQR is: ',scipy.stats.iqr(set1)) print('For set 1',' the 5-number summary is: ',set1.describe()) print('For set 1',' the variance is: ',statistics.variance(set1)) print('For set 1',' the standard deviation is: ',statistics.stdev(set1)) #For set2: print('For set 2',' the range is: ',np.ptp(set2)) print('For set 2',' the IQR is: ',scipy.stats.iqr(set2)) print('For set 2',' the 5-number summary is: ',set2.describe()) print('For set 2',' the variance is: ',statistics.variance(set2)) print('For set 2',' the standard deviation is: ',statistics.stdev(set2)) #For set3: print('For set 3',' the range is: ',np.ptp(set3)) print('For set 3',' the IQR is: ',scipy.stats.iqr(set3)) print('For set 3',' the 5-number summary is: ',set3.describe()) print('For set 3',' the variance is: ',statistics.variance(set3)) print('For set 3',' the standard deviation is: ',statistics.stdev(set3)) # #### Step5- Use descriptive statistics and compare the skewness of all sets skew1 = set1.skew() skew2 = set2.skew() skew3 = set3.skew() print('For set 1 the skewness is ',skew1,'For set 2 the skewness is ',skew2,'For set 3 the skewness is ',skew3) # #### Step6- Use boxplots and visually compare the spread of data points in all sets fig = plt.figure(figsize =(10, 7)) plt.boxplot ([set1, set2, set3],1, '') plt.show() # #### Step7- Use histograms and visually compare the distribution of data points in all sets set1.plot.hist(density=False, bins=6,color="red") set2.plot.hist(density=False, bins=6,color="blue") set3.plot.hist(density=False, bins=6,color="gold") fig, ax = plt.subplots() data.plot.hist(density=False, ax=ax, bins=6,color=("red","blue","gold")) # #### Step8- Use histograms with KDE and visually compare the continous shape of distributions in all sets sns.distplot(set1,color='red', rug=True,kde=True) sns.distplot(set2,color='blue', rug=True,kde=True) sns.distplot(set3,color='gold', rug=True,kde=True) # #### Step9- Use Gringorten Plotting Position Formula and draw a quantile plot for each set # First, define the function for the Gringorten Plotting Position Formula: def gringorten_pp(sample): # plotting position function # returns a list of plotting positions; sample must be a numeric list gringorten_pp = [] # null list to return after fill sample.sort() # sort the sample list in place for i in range(0,len(sample),1): gringorten_pp.append((i+1-0.44)/(len(sample)+0.12)) #values from the gringorten formula return gringorten_pp # + # Second, apply it on each set set1 = np.array(set1) set2 = np.array(set2) set3 = np.array(set3) set1_grin = gringorten_pp(set1) set2_grin = gringorten_pp(set2) set3_grin = gringorten_pp(set3) # + # Third, plot them myfigure = plt.figure(figsize = (12,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red', marker ="^", s = 50) plt.scatter(set2_grin, set2 ,color ='blue', marker ="o", s = 20) plt.scatter(set3_grin, set3 ,color ='gold', marker ="s", s = 20) plt.xlabel("Density or Quantile Value") plt.ylabel("Value") plt.title("Quantile Plot for Set1, Set2, and Set3 based on Gringorton Plotting Functions") plt.show() # - # #### Step10- Fit a Normal, Gumbell (Double Exponential), and Gamma Distribution Data Model and find the best alternative for each set. # + # Normal Quantile Function import math def normdist(x,mu,sigma): argument = (x - mu)/(math.sqrt(2.0)*sigma) normdist = (1.0 + math.erf(argument))/2.0 return normdist # - #For Set1 mu = set1.mean() # Fitted Model sigma = set1.std() x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(set1) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = normdist(xlow + i*xstep,mu,sigma) ycdf.append(yvalue) # Fitting Data to Normal Data Model # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,9)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red') plt.plot(ycdf, x, color ='darkred') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set1 | Normal Distribution Data Model sample mean = : " + str(mu)+ " sample variance =:" + str(sigma**2) plt.title(mytitle) plt.show() #For Set2 mu = set2.mean() # Fitted Model sigma = set2.std() x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(set2) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = normdist(xlow + i*xstep,mu,sigma) ycdf.append(yvalue) # Fitting Data to Normal Data Model # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,9)) # generate a object from the figure class, set aspect ratio plt.scatter(set2_grin, set2 ,color ='blue') plt.plot(ycdf, x, color ='darkblue') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set2 | Normal Distribution Data Model sample mean = : " + str(mu)+ " sample variance =:" + str(sigma**2) plt.title(mytitle) plt.show() #For Set3 mu = set3.mean() # Fitted Model sigma = set3.std() x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(set3) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = normdist(xlow + i*xstep,mu,sigma) ycdf.append(yvalue) # Fitting Data to Normal Data Model # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,9)) # generate a object from the figure class, set aspect ratio plt.scatter(set3_grin, set3 ,color ='gold') plt.plot(ycdf, x, color ='orange') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set3 | Normal Distribution Data Model sample mean = : " + str(mu)+ " sample variance =:" + str(sigma**2) plt.title(mytitle) plt.show() # + # Gumbell (Extreme Value Type I) Quantile Function def ev1dist(x,alpha,beta): argument = (x - alpha)/beta constant = 1.0/beta ev1dist = math.exp(-1.0*math.exp(-1.0*argument)) return ev1dist # + #For Set1 sample = set1 sample_mean = np.array(sample).mean() sample_variance = np.array(sample).std()**2 alpha_mom = sample_mean*math.sqrt(6)/math.pi beta_mom = math.sqrt(sample_variance)*0.45 ################ mu = sample_mean # Fitted Model sigma = math.sqrt(sample_variance) x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(sample) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = ev1dist(xlow + i*xstep,alpha_mom,beta_mom) ycdf.append(yvalue) # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red') plt.plot(ycdf, x, color ='darkred') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set1 | Extreme Value Type 1 Distribution Data Model sample mean = : " + str(sample_mean)+ " sample variance =:" + str(sample_variance) plt.title(mytitle) plt.show() # + #For Set2 sample = set2 sample_mean = np.array(sample).mean() sample_variance = np.array(sample).std()**2 alpha_mom = sample_mean*math.sqrt(6)/math.pi beta_mom = math.sqrt(sample_variance)*0.45 ################ mu = sample_mean # Fitted Model sigma = math.sqrt(sample_variance) x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(sample) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = ev1dist(xlow + i*xstep,alpha_mom,beta_mom) ycdf.append(yvalue) # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set2_grin, set2 ,color ='blue') plt.plot(ycdf, x, color ='darkblue') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set2 | Extreme Value Type 1 Distribution Data Model sample mean = : " + str(sample_mean)+ " sample variance =:" + str(sample_variance) plt.title(mytitle) plt.show() # + #For Set2 sample = set3 sample_mean = np.array(sample).mean() sample_variance = np.array(sample).std()**2 alpha_mom = sample_mean*math.sqrt(6)/math.pi beta_mom = math.sqrt(sample_variance)*0.45 ################ mu = sample_mean # Fitted Model sigma = math.sqrt(sample_variance) x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(sample) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = ev1dist(xlow + i*xstep,alpha_mom,beta_mom) ycdf.append(yvalue) # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set3_grin, set3 ,color ='gold') plt.plot(ycdf, x, color ='orange') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set3 | Extreme Value Type 1 Distribution Data Model sample mean = : " + str(sample_mean)+ " sample variance =:" + str(sample_variance) plt.title(mytitle) plt.show() # - # Gamma (Pearson Type III) Quantile Function def gammacdf(x,tau,alpha,beta): # Gamma Cumulative Density function - with three parameter to one parameter convert xhat = x-tau lamda = 1.0/beta gammacdf = scipy.stats.gamma.cdf(lamda*xhat, alpha) return gammacdf # + #For Set1 set1_mean = np.array(set1).mean() set1_stdev = np.array(set1).std() set1_skew = scipy.stats.skew(set1) set1_alpha = 4.0/(set1_skew**2) set1_beta = np.sign(set1_skew)*math.sqrt(set1_stdev**2/set1_alpha) set1_tau = set1_mean - set1_alpha*set1_beta # x = []; ycdf = [] xlow = (0.9*min(set1)); xhigh = (1.1*max(set1)) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = gammacdf(xlow + i*xstep,set1_tau,set1_alpha,set1_beta) ycdf.append(yvalue) #### rycdf = ycdf[::-1] myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red') plt.plot(rycdf, x, color ='darkred') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set1 | Pearson (Gamma) Type III Distribution Data Model\n " mytitle += "Mean = " + str(set1_mean) + "\n" mytitle += "SD = " + str(set1_stdev) + "\n" mytitle += "Skew = " + str(set1_skew) + "\n" plt.title(mytitle) plt.show() # + #For Set2 set2_mean = np.array(set2).mean() set2_stdev = np.array(set2).std() set2_skew = scipy.stats.skew(set2) set2_alpha = 4.0/(set2_skew**2) set2_beta = np.sign(set2_skew)*math.sqrt(set2_stdev**2/set2_alpha) set2_tau = set2_mean - set2_alpha*set2_beta # x = []; ycdf = [] xlow = (0.9*min(set2)); xhigh = (1.1*max(set2)) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = gammacdf(xlow + i*xstep,set2_tau,set2_alpha,set2_beta) ycdf.append(yvalue) #### rycdf = ycdf[::-1] myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set2_grin, set2 ,color ='blue') plt.plot(rycdf, x, color ='darkblue') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set2 | Pearson (Gamma) Type III Distribution Data Model\n " mytitle += "Mean = " + str(set2_mean) + "\n" mytitle += "SD = " + str(set2_stdev) + "\n" mytitle += "Skew = " + str(set2_skew) + "\n" plt.title(mytitle) plt.show() # + #For Set3 set3_mean = np.array(set3).mean() set3_stdev = np.array(set3).std() set3_skew = scipy.stats.skew(set3) set3_alpha = 4.0/(set3_skew**2) set3_beta = np.sign(set3_skew)*math.sqrt(set3_stdev**2/set3_alpha) set3_tau = set3_mean - set3_alpha*set3_beta # x = []; ycdf = [] xlow = (0.9*min(set3)); xhigh = (1.1*max(set3)) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = gammacdf(xlow + i*xstep,set3_tau,set3_alpha,set3_beta) ycdf.append(yvalue) #### #rycdf = ycdf[::-1] myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set3_grin, set3 ,color ='gold') plt.plot(ycdf, x, color ='orange') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set3 | Pearson (Gamma) Type III Distribution Data Model\n " mytitle += "Mean = " + str(set3_mean) + "\n" mytitle += "SD = " + str(set3_stdev) + "\n" mytitle += "Skew = " + str(set3_skew) + "\n" plt.title(mytitle) plt.show() # - # #### Step11- From visual assessment, Normal Distribution for Set1 and Set2, and Gamma Disribution for Set3 provide better fits. Run appropriate hypothesis tests and decide whether each set of samples has a normal disctribution or not. # The Shapiro-Wilk Normality Test for Set1 from scipy.stats import shapiro stat, p = shapiro(set1) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # The Shapiro-Wilk Normality Test for Set2 from scipy.stats import shapiro stat, p = shapiro(set2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # The Shapiro-Wilk Normality Test for Set3 from scipy.stats import shapiro stat, p = shapiro(set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # #### Step13- Run appropriate hypothesis tests and decide whether the three sets are significantly different or not. # + # The Student's t-test for Set1 and Set2 from scipy.stats import ttest_ind stat, p = ttest_ind(set1, set2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # + # The Student's t-test for Set1 and Set3 from scipy.stats import ttest_ind stat, p = ttest_ind(set1, set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # + # The Student's t-test for Set2 and Set3 from scipy.stats import ttest_ind stat, p = ttest_ind(set2, set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # - # #### Step14- Our field operation team installed a monitoring device on each well that can take samples and record the concentration of Mercury around 28 times per hour. After a month, the monitoring log is brought to the lab. Read the "lab15_maxidf.csv" file as a dataframe. data = pd.read_csv("lab15_maxidf.csv") data # #### Step15- Let's explore the dataset. data.info() # #### Step16- Use descriptive statistics and get an estimate of the center of the distribution for each set # + #For set1: set1 = data['SetA'] print('For set 1',' the arithmetic mean is: ',set1.mean()) print('For set 1',' the median is: ',set1.median()) # + #For set2: set2 = data['SetB'] print('For set 2',' the arithmetic mean is: ',set2.mean()) print('For set 2',' the median is: ',set2.median()) # + #For set3: set3 = data['SetC'] print('For set 3',' the arithmetic mean is: ',set3.mean()) print('For set 3',' the median is: ',set3.median()) # - # #### Step17- Use descriptive statistics and quantify the spread of data points for each set #For set1: print('For set 1',' the range is: ',np.ptp(set1)) print('For set 1',' the IQR is: ',scipy.stats.iqr(set1)) print('For set 1',' the 5-number summary is: ',set1.describe()) print('For set 1',' the variance is: ',statistics.variance(set1)) print('For set 1',' the standard deviation is: ',statistics.stdev(set1)) #For set2: print('For set 2',' the range is: ',np.ptp(set2)) print('For set 2',' the IQR is: ',scipy.stats.iqr(set2)) print('For set 2',' the 5-number summary is: ',set2.describe()) print('For set 2',' the variance is: ',statistics.variance(set2)) print('For set 2',' the standard deviation is: ',statistics.stdev(set2)) #For set3: print('For set 3',' the range is: ',np.ptp(set3)) print('For set 3',' the IQR is: ',scipy.stats.iqr(set3)) print('For set 3',' the 5-number summary is: ',set3.describe()) print('For set 3',' the variance is: ',statistics.variance(set3)) print('For set 3',' the standard deviation is: ',statistics.stdev(set3)) # #### Step18- Use descriptive statistics and compare the skewness of all sets skew1 = set1.skew() skew2 = set2.skew() skew3 = set3.skew() print('For set 1 the skewness is ',skew1,'For set 2 the skewness is ',skew2,'For set 3 the skewness is ',skew3) # #### Step19- Use boxplots and visually compare the spread of data points in all sets fig = plt.figure(figsize =(10, 7)) plt.boxplot ([set1, set2, set3],1, '') plt.show() # #### Step20- Use histograms and visually compare the distribution of data points in all sets set1.plot.hist(density=False, bins=50,color="red") set2.plot.hist(density=False, bins=50,color="blue") set3.plot.hist(density=False, bins=50,color="gold") fig, ax = plt.subplots() data.plot.hist(density=False, ax=ax, bins=50,color=("red","blue","gold")) # #### Step21- Use histograms with KDE and visually compare the continous shape of distributions in all sets sns.distplot(set1,color='red', rug=True,kde=True) sns.distplot(set2,color='blue', rug=True,kde=True) sns.distplot(set3,color='gold', rug=True,kde=True) # #### Step22- Use Gringorten Plotting Position Formula and draw a quantile plot for each set # First, define the function for the Gringorten Plotting Position Formula: def gringorten_pp(sample): # plotting position function # returns a list of plotting positions; sample must be a numeric list gringorten_pp = [] # null list to return after fill sample.sort() # sort the sample list in place for i in range(0,len(sample),1): gringorten_pp.append((i+1-0.44)/(len(sample)+0.12)) #values from the gringorten formula return gringorten_pp # + # Second, apply it on each set set1 = np.array(set1) set2 = np.array(set2) set3 = np.array(set3) set1_grin = gringorten_pp(set1) set2_grin = gringorten_pp(set2) set3_grin = gringorten_pp(set3) # + # Third, plot them myfigure = plt.figure(figsize = (12,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red', marker ="^", s = 50) plt.scatter(set2_grin, set2 ,color ='blue', marker ="o", s = 20) plt.scatter(set3_grin, set3 ,color ='gold', marker ="s", s = 20) plt.xlabel("Density or Quantile Value") plt.ylabel("Value") plt.title("Quantile Plot for Set1, Set2, and Set3 based on Gringorton Plotting Functions") plt.show() # - # #### Step23- Fit a Normal, Gumbell (Double Exponential), and Gamma Distribution Data Model and find the best alternative for each set. # + # Normal Quantile Function import math def normdist(x,mu,sigma): argument = (x - mu)/(math.sqrt(2.0)*sigma) normdist = (1.0 + math.erf(argument))/2.0 return normdist # - #For Set1 mu = set1.mean() # Fitted Model sigma = set1.std() x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(set1) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = normdist(xlow + i*xstep,mu,sigma) ycdf.append(yvalue) # Fitting Data to Normal Data Model # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,9)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red') plt.plot(ycdf, x, color ='darkred') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set1 | Normal Distribution Data Model sample mean = : " + str(mu)+ " sample variance =:" + str(sigma**2) plt.title(mytitle) plt.show() #For Set2 mu = set2.mean() # Fitted Model sigma = set2.std() x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(set2) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = normdist(xlow + i*xstep,mu,sigma) ycdf.append(yvalue) # Fitting Data to Normal Data Model # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,9)) # generate a object from the figure class, set aspect ratio plt.scatter(set2_grin, set2 ,color ='blue') plt.plot(ycdf, x, color ='darkblue') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set2 | Normal Distribution Data Model sample mean = : " + str(mu)+ " sample variance =:" + str(sigma**2) plt.title(mytitle) plt.show() #For Set3 mu = set3.mean() # Fitted Model sigma = set3.std() x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(set3) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = normdist(xlow + i*xstep,mu,sigma) ycdf.append(yvalue) # Fitting Data to Normal Data Model # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,9)) # generate a object from the figure class, set aspect ratio plt.scatter(set3_grin, set3 ,color ='gold') plt.plot(ycdf, x, color ='orange') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set3 | Normal Distribution Data Model sample mean = : " + str(mu)+ " sample variance =:" + str(sigma**2) plt.title(mytitle) plt.show() # + # Gumbell (Extreme Value Type I) Quantile Function def ev1dist(x,alpha,beta): argument = (x - alpha)/beta constant = 1.0/beta ev1dist = math.exp(-1.0*math.exp(-1.0*argument)) return ev1dist # + #For Set1 sample = set1 sample_mean = np.array(sample).mean() sample_variance = np.array(sample).std()**2 alpha_mom = sample_mean*math.sqrt(6)/math.pi beta_mom = math.sqrt(sample_variance)*0.45 ################ mu = sample_mean # Fitted Model sigma = math.sqrt(sample_variance) x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(sample) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = ev1dist(xlow + i*xstep,alpha_mom,beta_mom) ycdf.append(yvalue) # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red') plt.plot(ycdf, x, color ='darkred') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set1 | Extreme Value Type 1 Distribution Data Model sample mean = : " + str(sample_mean)+ " sample variance =:" + str(sample_variance) plt.title(mytitle) plt.show() # + #For Set2 sample = set2 sample_mean = np.array(sample).mean() sample_variance = np.array(sample).std()**2 alpha_mom = sample_mean*math.sqrt(6)/math.pi beta_mom = math.sqrt(sample_variance)*0.45 ################ mu = sample_mean # Fitted Model sigma = math.sqrt(sample_variance) x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(sample) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = ev1dist(xlow + i*xstep,alpha_mom,beta_mom) ycdf.append(yvalue) # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set2_grin, set2 ,color ='blue') plt.plot(ycdf, x, color ='darkblue') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set2 | Extreme Value Type 1 Distribution Data Model sample mean = : " + str(sample_mean)+ " sample variance =:" + str(sample_variance) plt.title(mytitle) plt.show() # + #For Set2 sample = set3 sample_mean = np.array(sample).mean() sample_variance = np.array(sample).std()**2 alpha_mom = sample_mean*math.sqrt(6)/math.pi beta_mom = math.sqrt(sample_variance)*0.45 ################ mu = sample_mean # Fitted Model sigma = math.sqrt(sample_variance) x = []; ycdf = [] xlow = 0; xhigh = 1.2*max(sample) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = ev1dist(xlow + i*xstep,alpha_mom,beta_mom) ycdf.append(yvalue) # Now plot the sample values and plotting position myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set3_grin, set3 ,color ='gold') plt.plot(ycdf, x, color ='orange') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set3 | Extreme Value Type 1 Distribution Data Model sample mean = : " + str(sample_mean)+ " sample variance =:" + str(sample_variance) plt.title(mytitle) plt.show() # - # Gamma (Pearson Type III) Quantile Function def gammacdf(x,tau,alpha,beta): # Gamma Cumulative Density function - with three parameter to one parameter convert xhat = x-tau lamda = 1.0/beta gammacdf = scipy.stats.gamma.cdf(lamda*xhat, alpha) return gammacdf # + #For Set1 set1_mean = np.array(set1).mean() set1_stdev = np.array(set1).std() set1_skew = scipy.stats.skew(set1) set1_alpha = 4.0/(set1_skew**2) set1_beta = np.sign(set1_skew)*math.sqrt(set1_stdev**2/set1_alpha) set1_tau = set1_mean - set1_alpha*set1_beta # x = []; ycdf = [] xlow = (0.9*min(set1)); xhigh = (1.1*max(set1)) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = gammacdf(xlow + i*xstep,set1_tau,set1_alpha,set1_beta) ycdf.append(yvalue) #### #rycdf = ycdf[::-1] myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set1_grin, set1 ,color ='red') plt.plot(ycdf, x, color ='darkred') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set1 | Pearson (Gamma) Type III Distribution Data Model\n " mytitle += "Mean = " + str(set1_mean) + "\n" mytitle += "SD = " + str(set1_stdev) + "\n" mytitle += "Skew = " + str(set1_skew) + "\n" plt.title(mytitle) plt.show() # + #For Set2 set2_mean = np.array(set2).mean() set2_stdev = np.array(set2).std() set2_skew = scipy.stats.skew(set2) set2_alpha = 4.0/(set2_skew**2) set2_beta = np.sign(set2_skew)*math.sqrt(set2_stdev**2/set2_alpha) set2_tau = set2_mean - set2_alpha*set2_beta # x = []; ycdf = [] xlow = (0.9*min(set2)); xhigh = (1.1*max(set2)) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = gammacdf(xlow + i*xstep,set2_tau,set2_alpha,set2_beta) ycdf.append(yvalue) #### #rycdf = ycdf[::-1] myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set2_grin, set2 ,color ='blue') plt.plot(ycdf, x, color ='darkblue') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set2 | Pearson (Gamma) Type III Distribution Data Model\n " mytitle += "Mean = " + str(set2_mean) + "\n" mytitle += "SD = " + str(set2_stdev) + "\n" mytitle += "Skew = " + str(set2_skew) + "\n" plt.title(mytitle) plt.show() # + #For Set3 set3_mean = np.array(set3).mean() set3_stdev = np.array(set3).std() set3_skew = scipy.stats.skew(set3) set3_alpha = 4.0/(set3_skew**2) set3_beta = np.sign(set3_skew)*math.sqrt(set3_stdev**2/set3_alpha) set3_tau = set3_mean - set3_alpha*set3_beta # x = []; ycdf = [] xlow = (0.9*min(set3)); xhigh = (1.1*max(set3)) ; howMany = 100 xstep = (xhigh - xlow)/howMany for i in range(0,howMany+1,1): x.append(xlow + i*xstep) yvalue = gammacdf(xlow + i*xstep,set3_tau,set3_alpha,set3_beta) ycdf.append(yvalue) #### #rycdf = ycdf[::-1] myfigure = plt.figure(figsize = (7,8)) # generate a object from the figure class, set aspect ratio plt.scatter(set3_grin, set3 ,color ='gold') plt.plot(ycdf, x, color ='orange') plt.xlabel("Quantile Value") plt.ylabel("Value") mytitle = "For Set3 | Pearson (Gamma) Type III Distribution Data Model\n " mytitle += "Mean = " + str(set3_mean) + "\n" mytitle += "SD = " + str(set3_stdev) + "\n" mytitle += "Skew = " + str(set3_skew) + "\n" plt.title(mytitle) plt.show() # - # #### Step24- Run appropriate hypothesis tests and decide whether each set of samples has a normal disctribution or not. # The Shapiro-Wilk Normality Test for Set1 from scipy.stats import shapiro stat, p = shapiro(set1) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # The Shapiro-Wilk Normality Test for Set2 from scipy.stats import shapiro stat, p = shapiro(set2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # The Shapiro-Wilk Normality Test for Set3 from scipy.stats import shapiro stat, p = shapiro(set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # #### Step25- Run appropriate hypothesis tests and decide whether the three sets are significantly different or not. # + # The Student's t-test for Set1 and Set2 from scipy.stats import ttest_ind stat, p = ttest_ind(set1, set2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # + # The Student's t-test for Set1 and Set3 from scipy.stats import ttest_ind stat, p = ttest_ind(set1, set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # + # The Student's t-test for Set2 and Set3 from scipy.stats import ttest_ind stat, p = ttest_ind(set2, set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # - # Example of the Analysis of Variance Test from scipy.stats import f_oneway stat, p = f_oneway(set1, set2, set3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') # ___ # ![](https://media2.giphy.com/media/dNgK7Ws7y176U/200.gif) <br> # # ## Exercise: Normality... who cares? <br> # # ### Why should we check data for normality? # # #### _Make sure to cite any resources that you may use._ # ![](https://images.squarespace-cdn.com/content/v1/5cdb2d33b7c92ca1155717df/1573586997392-V8FS2GE2ZBZ62R7QY0CO/ke17ZwdGBToddI8pDm48kMgKQE9tC-JTZlso7Pn8G1RZw-zPPgdn4jUwVcJE1ZvWQUxwkmyExglNqGp0IvTJZUJFbgE-7XRK3dMEBRBhUpwKilRWyPemczd8jMYzGiN1wzQyIb5hPcMsyZ8ZzLIg-21sxJ0kGaG-pkVsVty1_QM/that%27s+all+folks.jpeg)
8-Labs/Z-Spring2021/Lab15/Lab15_Dev.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 고유분해 # # - $A = V\Lambda$$V^T$ # - 정방행렬 # - $Au = \lambda{u}$ # ----- # ### 정방행렬 # # # - $A\in{R}^{N\times{M}}$ # # - $\lambda$는 복소수 # - $\lambda$는 $N$개 # - $AV = V\Lambda$ # - $tr A = \Sigma_{i=1}^{N}\lambda_i$ # - $det A = \Pi_{i=1}^{N}\lambda_i$ # # ---- # ### 대칭행렬 # # - $A = A^T$ # - $\lambda$는 실수 # - $V^T = V^{-1}$ # - $A = V\Lambda{V}^T$ # - $A = \Sigma_{i=1}^N\lambda_iV_iV_i^T$ # # ---- # ### 분산행렬 # # - $A = X^TX$ # - 모든 고유값이 0 또는 양수 $\lambda_i \geq 0$ # # ---- # ### 양의 정부호 (PD) # # - $X$ 가 Full RANK # - 역행렬 존재 # - 모든 고유값이 양수 $\lambda_i \geq 0$ # ---- # ### 특이분해 # # - $A = U\Sigma$$V^T$ # # - from numpy.linalg import svd # - A = np.array([[3,-1],[1,3],[1,1]]) # - U, S, VT = svd(A) # - svd(A, full_matrices=False) $\rightarrow$ 축소형 # - $Au = \sigma{u}$ # # ---- # # #### RANK-K 근사문제 # # - $ arg min \|A-AWW^T\|$ # ### PCA (주성분 분석) # # - 차원이 높은 데이터 # # $\rightarrow$ 데이터를 이루는 벡터의 차원이 많다라는 뜻 # ---- # #### scikit-learn 의 PCA 기능 # # - 입력인수 : n_components : 정수 # # - method : fit_transform() : 특징행렬을 낮은 차원의 근사행렬로 변환 # inverse_transform() : 변환된 근사행렬을 원래의 차원으로 복귀 # # - 속성 : mean_ : 평균벡터 # components_ : 주성분 벡터 # + # load iris data from sklearn.datasets import load_iris # - import warnings warnings.filterwarnings("ignore") iris = load_iris() N = 10 # 앞의 10송이만 선택 X = iris.data[:N, :2] # 꽃받침 길이와 꽃받침 폭만 선택 X # + # PCA from sklearn.decomposition import PCA # - pca1 = PCA(n_components=1) X_low = pca1.fit_transform(X) # + # 잠재변수 # 원점을 옯기면서 음수가 나오게 되는 것 X_low # - X2 = pca1.inverse_transform(X_low) # + # 저차원으로 투영 X2 # - plt.figure(figsize=(7, 7)) ax = sns.scatterplot(0, 1, data=pd.DataFrame(X), s=100, color=".2", marker="s") for i in range(N): d = 0.03 if X[i, 1] > X2[i, 1] else -0.04 ax.text(X[i, 0] - 0.065, X[i, 1] + d, "표본 {}".format(i + 1)) plt.plot([X[i, 0], X2[i, 0]], [X[i, 1], X2[i, 1]], "k--") plt.plot(X2[:, 0], X2[:, 1], "o-", markersize=10) plt.plot(X[:, 0].mean(), X[:, 1].mean(), markersize=10, marker="D") plt.axvline(X[:, 0].mean(), c='r') plt.axhline(X[:, 1].mean(), c='r') plt.grid(False) plt.xlabel("꽃받침 길이") plt.ylabel("꽃받침 폭") plt.title("Iris 데이터의 1차원 차원축소") plt.axis("equal") plt.show() # + # 데이터가 있는 중심 위치 pca1.mean_ # + # 파란색 기저벡터의 방향을 나타내는 단위 벡터 pca1.components_ # - X0 = X - X.mean(axis=0) U, S, VT = np.linalg.svd(X0) VT VT[:, 0] # #### 분산행렬의 고유벡터랑 일치 XCOV = X0.T @ X0 W, V = np.linalg.eig(XCOV) W V V[:, np.argmax(W)] # #### 8번째 꽃의 꽃받침 길이와 꽃받침 폭은 다음과 같다. X[7, :] # + # 주성분 X_low[7] # - X2[7, :] # ---- # # ### 3.5.1 연습문제 # # - 붓꽃 데이터 중 앞에서 50개의 데이터(setosa 중)에 대해 다음 문제를 풀어라. # # 1) 꽃 잎의 길이와 꽃잎의 폭을 이용하여 1차원 PCA를 수행하라. 꽃의 크기는 꽃받침 길이와 꽃받침 폭의 어떤 선형조합으로 나타나는가. N = 10 X = iris.data[:N, :2] X pca3 = PCA(n_components=1) X_low = pca3.fit_transform(X) X_low X2 = pca3.inverse_transform(X_low) X2 # 2) 꽃 받침 길이와 폭, 꽃잎 길이와 폭, 이 4가지 변수를 모두 사용하여 1차원 PCA를 수행하라. 꽃의 크기는 관측 데이터의 어떤 선형조합으로 나타나는가. N = 10 X = iris.data[:N, :] X pca2 = PCA(n_components=1) X_low = pca2.fit_transform(X) X_low X2 = pca2.inverse_transform(X_low) X2 # #### PCA를 왜 배워야하나? # # - 데이터의 양이 종류가 많은 것보다 좋다. # - 선형 종속에 가까워지면 역행렬이 거의 나오지 않는다. # # $\rightarrow$ 데이터를 버리는 작업을 해야한다. (특징선택:feature selection) # # * 이럴 때 PCA를 활용!! # $\rightarrow$ 데이터를 압축해서 저차원의 데이터로 만들어 줌. # # ----- # ### 이미지 PCA from sklearn.datasets import fetch_olivetti_faces faces_all = fetch_olivetti_faces() K = 15 # 20번 인물의 사진만 선택 faces = faces_all.images[faces_all.target == K] N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) ax.imshow(faces[k], cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.suptitle("olivetti_face image") plt.tight_layout() plt.show() faces.shape 64*64 pca4 = PCA(n_components=2) X3 = faces_all.data[faces_all.target == K] W3 = pca4.fit_transform(X3) X32 = pca4.inverse_transform(W3) X32 # + # eigenface faces_all = fetch_olivetti_faces() K = 8 # - faces = faces_all.images[faces_all.target == K] N = 2 M = 5 fig = plt.figure(figsize=(10, 5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i * M + j ax = fig.add_subplot(N, M, k+1) ax.imshow(faces[k], cmap=plt.cm.bone) ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.suptitle("olivetti_face image") plt.tight_layout() plt.show() # ---
MATH/05_PCA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/missing-values).** # # --- # # Now it's your turn to test your new knowledge of **missing values** handling. You'll probably find it makes a big difference. # # # Setup # # The questions will give you feedback on your work. Run the following cell to set up the feedback system. # Set up code checking import os if not os.path.exists("../input/train.csv"): os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv") os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv") from learntools.core import binder binder.bind(globals()) from learntools.ml_intermediate.ex2 import * print("Setup Complete") # In this exercise, you will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course). # # ![Ames Housing dataset image](https://i.imgur.com/lTJVG4e.png) # # Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`. # + import pandas as pd from sklearn.model_selection import train_test_split # Read the data X_full = pd.read_csv('../input/train.csv', index_col='Id') X_test_full = pd.read_csv('../input/test.csv', index_col='Id') # Remove rows with missing target, separate target from predictors X_full.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X_full.SalePrice X_full.drop(['SalePrice'], axis=1, inplace=True) # To keep things simple, we'll use only numerical predictors X = X_full.select_dtypes(exclude=['object']) X_test = X_test_full.select_dtypes(exclude=['object']) # Break off validation set from training data X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) # - # Use the next code cell to print the first five rows of the data. X_train.head() # You can already see a few missing values in the first several rows. In the next step, you'll obtain a more comprehensive understanding of the missing values in the dataset. # # # Step 1: Preliminary investigation # # Run the code cell below without changes. # + # Shape of training data (num_rows, num_columns) print(X_train.shape) # Number of missing values in each column of training data missing_val_count_by_column = (X_train.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column > 0]) # - # ### Part A # # Use the above output to answer the questions below. # + # Fill in the line below: How many rows are in the training data? num_rows = 1168 # Fill in the line below: How many columns in the training data # have missing values? num_cols_with_missing = 3 # Fill in the line below: How many missing entries are contained in # all of the training data? tot_missing = 276 # Check your answers step_1.a.check() # + # Lines below will give you a hint or solution code #step_1.a.hint() #step_1.a.solution() # - # ### Part B # Considering your answers above, what do you think is likely the best approach to dealing with the missing values? # Check your answer (Run this code cell to receive credit!) step_1.b.check() # + #step_1.b.hint() # - # To compare different approaches to dealing with missing values, you'll use the same `score_dataset()` function from the tutorial. This function reports the [mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) (MAE) from a random forest model. # + from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error # Function for comparing different approaches def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=100, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) # - # # Step 2: Drop columns with missing values # # In this step, you'll preprocess the data in `X_train` and `X_valid` to remove columns with missing values. Set the preprocessed DataFrames to `reduced_X_train` and `reduced_X_valid`, respectively. # + # Fill in the line below: get names of columns with missing values cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] # Your code here # Fill in the lines below: drop columns in training and validation data reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_valid = X_valid.drop(cols_with_missing, axis=1) # Check your answers step_2.check() # + # Lines below will give you a hint or solution code #step_2.hint() #step_2.solution() # - # Run the next code cell without changes to obtain the MAE for this approach. print("MAE (Drop columns with missing values):") print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid)) # # Step 3: Imputation # # ### Part A # # Use the next code cell to impute missing values with the mean value along each column. Set the preprocessed DataFrames to `imputed_X_train` and `imputed_X_valid`. Make sure that the column names match those in `X_train` and `X_valid`. # + from sklearn.impute import SimpleImputer # Fill in the lines below: imputation my_imputer = SimpleImputer() # Your code here imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid)) # Fill in the lines below: imputation removed column names; put them back imputed_X_train.columns = X_train.columns imputed_X_valid.columns = X_valid.columns # Check your answers step_3.a.check() # + # Lines below will give you a hint or solution code #step_3.a.hint() #step_3.a.solution() # - # Run the next code cell without changes to obtain the MAE for this approach. print("MAE (Imputation):") print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid)) # ### Part B # # Compare the MAE from each approach. Does anything surprise you about the results? Why do you think one approach performed better than the other? # Check your answer (Run this code cell to receive credit!) step_3.b.check() # + #step_3.b.hint() # - # # Step 4: Generate test predictions # # In this final step, you'll use any approach of your choosing to deal with missing values. Once you've preprocessed the training and validation features, you'll train and evaluate a random forest model. Then, you'll preprocess the test data before generating predictions that can be submitted to the competition! # # ### Part A # # Use the next code cell to preprocess the training and validation data. Set the preprocessed DataFrames to `final_X_train` and `final_X_valid`. **You can use any approach of your choosing here!** in order for this step to be marked as correct, you need only ensure: # - the preprocessed DataFrames have the same number of columns, # - the preprocessed DataFrames have no missing values, # - `final_X_train` and `y_train` have the same number of rows, and # - `final_X_valid` and `y_valid` have the same number of rows. # + # Preprocessed training and validation features final_imputer = SimpleImputer(strategy='median') final_X_train = pd.DataFrame(final_imputer.fit_transform(X_train)) final_X_valid = pd.DataFrame(final_imputer.transform(X_valid)) final_X_train.columns = X_train.columns final_X_valid.columns = X_valid.columns # Check your answers step_4.a.check() # + # Lines below will give you a hint or solution code #step_4.a.hint() #step_4.a.solution() # - # Run the next code cell to train and evaluate a random forest model. (*Note that we don't use the `score_dataset()` function above, because we will soon use the trained model to generate test predictions!*) # + # Define and fit model model = RandomForestRegressor(n_estimators=100, random_state=0) model.fit(final_X_train, y_train) # Get validation predictions and MAE preds_valid = model.predict(final_X_valid) print("MAE (Your approach):") print(mean_absolute_error(y_valid, preds_valid)) # - # ### Part B # # Use the next code cell to preprocess your test data. Make sure that you use a method that agrees with how you preprocessed the training and validation data, and set the preprocessed test features to `final_X_test`. # # Then, use the preprocessed test features and the trained model to generate test predictions in `preds_test`. # # In order for this step to be marked correct, you need only ensure: # - the preprocessed test DataFrame has no missing values, and # - `final_X_test` has the same number of rows as `X_test`. # + # Fill in the line below: preprocess test data final_X_test = pd.DataFrame(final_imputer.transform(X_test)) # Fill in the line below: get test predictions preds_test = model.predict(final_X_test) # Check your answers step_4.b.check() # + # Lines below will give you a hint or solution code #step_4.b.hint() #step_4.b.solution() # - # Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition. # Save test predictions to file output = pd.DataFrame({'Id': X_test.index, 'SalePrice': preds_test}) output.to_csv('submission.csv', index=False) # # Submit your results # # Once you have successfully completed Step 4, you're ready to submit your results to the leaderboard! (_You also learned how to do this in the previous exercise. If you need a reminder of how to do this, please use the instructions below._) # # First, you'll need to join the competition if you haven't already. So open a new window by clicking on [this link](https://www.kaggle.com/c/home-data-for-ml-course). Then click on the **Join Competition** button. # # ![join competition image](https://i.imgur.com/wLmFtH3.png) # # Next, follow the instructions below: # 1. Begin by clicking on the blue **Save Version** button in the top right corner of the window. This will generate a pop-up window. # 2. Ensure that the **Save and Run All** option is selected, and then click on the blue **Save** button. # 3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions. # 4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard. # # You have now successfully submitted to the competition! # # If you want to keep working to improve your performance, select the blue **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work. # # # # Keep going # # Move on to learn what **[categorical variables](https://www.kaggle.com/alexisbcook/categorical-variables)** are, along with how to incorporate them into your machine learning models. Categorical variables are very common in real-world data, but you'll get an error if you try to plug them into your models without processing them first! # --- # # # # # *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161289) to chat with other Learners.*
Kaggle Courses/Intermediate Machine Learning/exercise-missing-values.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # # In this exercise you'll apply more advanced encodings to encode the categorical variables ito improve your classifier model. The encodings you will implement are: # # - Count Encoding # - Target Encoding # - CatBoost Encoding # # You'll refit the classifier after each encoding to check its performance on hold-out data. # # Begin by running the next code cell to set up the notebook. # Set up code checking # This can take a few seconds from learntools.core import binder binder.bind(globals()) from learntools.feature_engineering.ex2 import * # The next code cell repeats the work that you did in the previous exercise. # + import numpy as np import pandas as pd from sklearn import preprocessing, metrics import lightgbm as lgb clicks = pd.read_parquet('../input/feature-engineering-data/baseline_data.pqt') # - # Next, we define a couple functions that you'll use to test the encodings that you implement in this exercise. # + def get_data_splits(dataframe, valid_fraction=0.1): """Splits a dataframe into train, validation, and test sets. First, orders by the column 'click_time'. Set the size of the validation and test sets with the valid_fraction keyword argument. """ dataframe = dataframe.sort_values('click_time') valid_rows = int(len(dataframe) * valid_fraction) train = dataframe[:-valid_rows * 2] # valid size == test size, last two sections of the data valid = dataframe[-valid_rows * 2:-valid_rows] test = dataframe[-valid_rows:] return train, valid, test def train_model(train, valid, test=None, feature_cols=None): if feature_cols is None: feature_cols = train.columns.drop(['click_time', 'attributed_time', 'is_attributed']) dtrain = lgb.Dataset(train[feature_cols], label=train['is_attributed']) dvalid = lgb.Dataset(valid[feature_cols], label=valid['is_attributed']) param = {'num_leaves': 64, 'objective': 'binary', 'metric': 'auc', 'seed': 7} num_round = 1000 bst = lgb.train(param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=20, verbose_eval=False) valid_pred = bst.predict(valid[feature_cols]) valid_score = metrics.roc_auc_score(valid['is_attributed'], valid_pred) print(f"Validation AUC score: {valid_score}") if test is not None: test_pred = bst.predict(test[feature_cols]) test_score = metrics.roc_auc_score(test['is_attributed'], test_pred) return bst, valid_score, test_score else: return bst, valid_score # - # Run this cell to get a baseline score. print("Baseline model") train, valid, test = get_data_splits(clicks) _ = train_model(train, valid) # ### 1) Categorical encodings and leakage # # These encodings are all based on statistics calculated from the dataset like counts and means. # # Considering this, what data should you be using to calculate the encodings? Specifically, can you use the validation data? Can you use the test data? # # Run the following line after you've decided your answer. # Check your answer (Run this code cell to receive credit!) q_1.solution() # ### 2) Count encodings # # Begin by running the next code cell to get started. # + import category_encoders as ce cat_features = ['ip', 'app', 'device', 'os', 'channel'] train, valid, test = get_data_splits(clicks) # - # Next, encode the categorical features `['ip', 'app', 'device', 'os', 'channel']` using the count of each value in the data set. # - Using `CountEncoder` from the `category_encoders` library, fit the encoding using the categorical feature columns defined in `cat_features`. # - Then apply the encodings to the train and validation sets, adding them as new columns with names suffixed `"_count"`. # + # Create the count encoder count_enc = ____ # Learn encoding from the training set ____ # Apply encoding to the train and validation sets as new columns # Make sure to add `_count` as a suffix to the new columns train_encoded = ____ valid_encoded = ____ # Check your answer q_2.check() # + # Uncomment if you need some guidance # q_2.hint() # q_2.solution() # + # #%%RM_IF(PROD)%% # Create the count encoder count_enc = ce.CountEncoder(cols=cat_features) # Learn encoding from the training set count_enc.fit(train[cat_features]) # Apply encoding to the train and validation sets train_encoded = train.join(count_enc.transform(train[cat_features]).add_suffix('_count')) valid_encoded = valid.join(count_enc.transform(valid[cat_features]).add_suffix('_count')) q_2.assert_check_passed() # - # Run the next code cell to see how count encoding changes the results. # Train the model on the encoded datasets # This can take around 30 seconds to complete _ = train_model(train_encoded, valid_encoded) # Count encoding improved our model's score! # ### 3) Why is count encoding effective? # At first glance, it could be surprising that count encoding helps make accurate models. # Why do you think is count encoding is a good idea, or how does it improve the model score? # # Run the following line after you've decided your answer. # Check your answer (Run this code cell to receive credit!) q_3.solution() # ### 4) Target encoding # # Here you'll try some supervised encodings that use the labels (the targets) to transform categorical features. The first one is target encoding. # - Create the target encoder from the `category_encoders` library. # - Then, learn the encodings from the training dataset, apply the encodings to all the datasets, and retrain the model. # + # Create the target encoder. You can find this easily by using tab completion. # Start typing ce. the press Tab to bring up a list of classes and functions. target_enc = ____ # Learn encoding from the training set. Use the 'is_attributed' column as the target. ____ # Apply encoding to the train and validation sets as new columns # Make sure to add `_target` as a suffix to the new columns train_encoded = ____ valid_encoded = ____ # Check your answer q_4.check() # + # Uncomment these if you need some guidance #q_4.hint() #q_4.solution() # + # #%%RM_IF(PROD)%% target_enc = ce.TargetEncoder(cols=cat_features) target_enc.fit(train[cat_features], train['is_attributed']) train_encoded = train.join(target_enc.transform(train[cat_features]).add_suffix('_target')) valid_encoded = valid.join(target_enc.transform(valid[cat_features]).add_suffix('_target')) q_4.assert_check_passed() # - # Run the next cell to see how target encoding affects your results. _ = train_model(train_encoded, valid_encoded) # ### 5) Try removing IP encoding # # If you leave `ip` out of the encoded features and retrain the model with target encoding, you should find that the score increases and is above the baseline score! Why do you think the score is below baseline when we encode the IP address but above baseline when we don't? # # Run the following line after you've decided your answer. # Check your answer (Run this code cell to receive credit!) q_5.solution() # ### 6) CatBoost Encoding # # The CatBoost encoder is supposed to work well with the LightGBM model. Encode the categorical features with `CatBoostEncoder` and train the model on the encoded data again. # + # Remove IP from the encoded features cat_features = ['app', 'device', 'os', 'channel'] # Create the CatBoost encoder cb_enc = ce.CatBoostEncoder(cols=cat_features, random_state=7) # Learn encoding from the training set ____ # Apply encoding to the train and validation sets as new columns # Make sure to add `_cb` as a suffix to the new columns train_encoded = ____ valid_encoded = ____ # Check your answer q_6.check() # + # Uncomment these if you need some guidance #q_6.hint() #q_6.solution() # + # #%%RM_IF(PROD)%% cat_features = ['app', 'device', 'os', 'channel'] cb_enc = ce.CatBoostEncoder(cols=cat_features, random_state=7) # Learn encodings on the train set cb_enc.fit(train[cat_features], train['is_attributed']) # Apply encodings to each set train_encoded = train.join(cb_enc.transform(train[cat_features]).add_suffix('_cb')) valid_encoded = valid.join(cb_enc.transform(valid[cat_features]).add_suffix('_cb')) q_6.assert_check_passed() # - # Run the next code cell to see how the CatBoost encoder changes your results. _ = train_model(train_encoded, valid_encoded) # # Keep Going # # Now you are ready to **[generate completely new features](#$NEXT_NOTEBOOK_URL$)** from the data.
notebooks/feature_engineering/raw/ex2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Wrangling Template # ## Gather import pandas as pd import zipfile # Extract all contents from zip file with zipfile.ZipFile('armenian-online-job-postings.zip', 'r') as myzip: myzip.extractall() # Read CSV (comma-separated) file into DataFrame df = pd.read_csv('online-job-postings.csv') # ## Assess df df.info() # - Missing values (NaN) # - StartDate inconsistencies (ASAP) # - Fix nondescriptive column headers (ApplicationP, AboutC, RequiredQual ... and also JobRequirment) # # ## Clean # #### Define # - Select all nondescriptive and misspelled column headers (ApplicationP, AboutC, RequiredQual, JobRequirment) and replace them with full words (ApplicationProcedure, AboutCompany, RequiredQualifications, JobRequirement) # - Select all records in the StartDate column that have "As soon as possible", "Immediately", etc. and replace the text in those cells with "ASAP" # # #### Code df_clean = df.copy() # - Select all nondescriptive and misspelled column headers (ApplicationP, AboutC, RequiredQual, JobRequirment) and replace them with full words (ApplicationProcedure, AboutCompany, RequiredQualifications, JobRequirement) df_clean = df_clean.rename(columns={'ApplicationP': 'ApplicationProcedure', 'AboutC': 'AboutCompany', 'RequiredQual': 'RequiredQualifications', 'JobRequirment': 'JobRequirements'}) # - Select all records in the StartDate column that have "As soon as possible", "Immediately", etc. and replace the text in those cells with "ASAP" # + asap_list = ['Immediately', 'As soon as possible', 'Upon hiring', 'Immediate', 'Immediate employment', 'As soon as possible.', 'Immediate job opportunity', '"Immediate employment, after passing the interview."', 'ASAP preferred', 'Employment contract signature date', 'Immediate employment opportunity', 'Immidiately', 'ASA', 'Asap', '"The position is open immediately but has a flexible start date depending on the candidates earliest availability."', 'Immediately upon agreement', '20 November 2014 or ASAP', 'immediately', 'Immediatelly', '"Immediately upon selection or no later than November 15, 2009."', 'Immediate job opening', 'Immediate hiring', 'Upon selection', 'As soon as practical', 'Immadiate', 'As soon as posible', 'Immediately with 2 months probation period', '12 November 2012 or ASAP', 'Immediate employment after passing the interview', 'Immediately/ upon agreement', '01 September 2014 or ASAP', 'Immediately or as per agreement', 'as soon as possible', 'As soon as Possible', 'in the nearest future', 'immediate', '01 April 2014 or ASAP', 'Immidiatly', 'Urgent', 'Immediate or earliest possible', 'Immediate hire', 'Earliest possible', 'ASAP with 3 months probation period.', 'Immediate employment opportunity.', 'Immediate employment.', 'Immidietly', 'Imminent', 'September 2014 or ASAP', 'Imediately'] for entry in asap_list: df_clean.StartDate.replace(entry , 'ASAP', inplace=True) # - # #### Test df_clean['StartDate'].value_counts()
data-wrangling-template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Ethics in Data Science # Considerations: # # **Starting point: you are incredibly fortunate to be a student in MSAN, to be a data scientist in San Francisco. You have a responsibility to not do harm with your skills.** # **Not everything your employer asks you to do may be legal.** An engineer at Volkswagen was [sentenced to 3.5 years in prison](https://www.nytimes.com/2017/08/25/business/volkswagen-engineer-prison-diesel-cheating.html) for helping develop the software to cheat on federal emissions tests. Your boss asking you to do something is not an excuse that will protect you in court. # **Not everything that is legal is ethical.** # **What questions are worth asking?** # **What data should be collected?** (Privacy, de-anonymization, risks of leaks.) - # Unintended consequences: **Impact can be very different from intent.** E.g. impact of online (semi-automated) censorship of violent videos. # **Don't blindly optimize for a metric (or group of metrics) without thinking about bigger picture.** As [<NAME> points out](https://www.youtube.com/watch?list=PLB2SCq-tZtVmadnKpO8WwKiFKteY5rHPT&time_continue=1624&v=WjKdKvDS10g), including information about whether someone's father left when they were a child could increase accuracy of a model for criminial recidivism, but is it moral to do so? # # <img src="images/ethics_recidivism.jpg" alt="digit" style="width: 40%"/> # (Source: [Ethics for Powerful Algorithms](https://www.youtube.com/watch?list=PLB2SCq-tZtVmadnKpO8WwKiFKteY5rHPT&time_continue=1624&v=WjKdKvDS10g)) # **Costs of Mistakes** # *There’s no hiding behind algorithms anymore.* -Atlantic reporter <NAME>. After the recent shooting in Vegas, [Google surfaced a 4chan story](https://www.theatlantic.com/technology/archive/2017/10/google-and-facebook-have-failed-us/541794/) as a top search result and Facebook's Trending Stories showed a page from a known source of Russian propaganda. # **Self-reinforcing Feedback loops** # # Eg. if you don't show women tech meetups, then you'll get even less women coming to tech meetups, which will cause the algorithm to suggest even less tech meetups to women, etc... See Evan Estola: [When Recommendation Systems Go Bad](https://www.youtube.com/watch?v=MqoRzNhrTnQ) # # Sending more cops to predominantly Black neighborhoods will increase reported black crime, increasing bias against blacks in policing algorithms.
courses/ml1/Ethics in Data Science.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pickle import random from tqdm.notebook import tqdm from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize from skimage.transform import resize from skimage.io import imread from skimage import color def unpickle(file): fo = open(file, 'rb') dict = pickle.load(fo, encoding='latin1') fo.close() return dict def grayscale(a): return a.reshape(a.shape[0], 3, 32, 32).mean(1).reshape(a.shape[0], -1) def clean(data): imgs = data.reshape(data.shape[0], 3, 32, 32) grayscale_imgs = imgs.mean(1) cropped_imgs = grayscale_imgs[:, 4:28, 4:28] img_data = cropped_imgs.reshape(data.shape[0], -1) img_size = np.shape(img_data)[1] means = np.mean(img_data, axis=1) meansT = means.reshape(len(means), 1) stds = np.std(img_data, axis=1) stdsT = stds.reshape(len(stds), 1) adj_stds = np.maximum(stdsT, 1.0 / np.sqrt(img_size)) normalized = (img_data - meansT) / adj_stds return normalized def read_data(directory): names = unpickle('{}/batches.meta'.format(directory))['label_names'] print('names', names) data, labels = [], [] for i in range(1, 6): filename = '{}/data_batch_{}'.format(directory, i) batch_data = unpickle(filename) if len(data) > 0: data = np.vstack((data, batch_data['data'])) labels = np.hstack((labels, batch_data['labels'])) else: data = batch_data['data'] labels = batch_data['labels'] print(np.shape(data), np.shape(labels)) data = clean(data) data = data.astype(np.float32) return names, data, labels names, data, labels = read_data('../data/cifar-10-batches-py') x = tf.placeholder(tf.float32, [None, 24 * 24]) # 5000,576 y = tf.placeholder(tf.float32, [None, len(names)]) # 50000, 10 W1 = tf.Variable(tf.random_normal([5, 5, 1, 64])) # first convo layer with 64 filters size 5x5, one band greyscale, image input size 24x24 b1 = tf.Variable(tf.random_normal([64])) # biases for first convo layer W2 = tf.Variable(tf.random_normal([5, 5, 64, 64])) # second convo layer with 64 filters size 5x5, run on the 64 outputs of first layer, image input size 12x12 b2 = tf.Variable(tf.random_normal([64])) # biases for the second convo layer W3 = tf.Variable(tf.random_normal([6*6*64, 1024])) # fully connected layer taking output of convo layer which are 64 image inputs size 6x6 and mapping to 32x32 original image b3 = tf.Variable(tf.random_normal([1024])) # biases for fully connected layer W_out = tf.Variable(tf.random_normal([1024, len(names)])) # output weights for final layer of actual labels, 1024 features mapped to 10 labels one hot encoded - used for hlf correlations b_out = tf.Variable(tf.random_normal([len(names)])) # biases for output labels def conv_layer(x, W, b): conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') conv_with_b = tf.nn.bias_add(conv, b) conv_out = tf.nn.relu(conv_with_b) return conv_out def maxpool_layer(conv, k=2): return tf.nn.max_pool(conv, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') def model(): x_reshaped = tf.reshape(x, shape=[-1, 24, 24, 1]) conv_out1 = conv_layer(x_reshaped, W1, b1) maxpool_out1 = maxpool_layer(conv_out1) norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv_out2 = conv_layer(norm1, W2, b2) norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) maxpool_out2 = maxpool_layer(norm2) maxpool_reshaped = tf.reshape(maxpool_out2, [-1, W3.get_shape().as_list()[0]]) local = tf.add(tf.matmul(maxpool_reshaped, W3), b3) local_out = tf.nn.relu(local) out = tf.add(tf.matmul(local_out, W_out), b_out) return out model_op = model() cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model_op, labels=y)) train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost) correct_pred = tf.equal(tf.argmax(model_op, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) epochs=2000 config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() onehot_labels = tf.one_hot(labels, len(names), on_value=1., off_value=0., axis=-1) onehot_vals = sess.run(onehot_labels) batch_size = len(data) // 200 print('batch size', batch_size) for j in tqdm(range(0, 1000)): #print('EPOCH', j) for i in range(0, len(data), batch_size): batch_data = data[i:i+batch_size, :] batch_onehot_vals = onehot_vals[i:i+batch_size, :] _, accuracy_val = sess.run([train_op, accuracy], feed_dict={x:batch_data, y: batch_onehot_vals}) #if i % 1000 == 0: # print(i, accuracy_val) #print('DONE WITH EPOCH') print(j, accuracy_val) saver.save(sess, '../models/cifar10-cnn-tf1n-'+str(epochs)+'epochs.ckpt') def model_X(x, W1, b1, W2, b2, W3, b3, W_out, b_out): x_reshaped = tf.reshape(x, shape=[-1, 24, 24, 1]) conv_out1 = conv_layer(x_reshaped, W1, b1) maxpool_out1 = maxpool_layer(conv_out1) norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv_out2 = conv_layer(norm1, W2, b2) norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) maxpool_out2 = maxpool_layer(norm2) maxpool_reshaped = tf.reshape(maxpool_out2, [-1, W3.shape[0]]) local = tf.add(tf.matmul(maxpool_reshaped, W3), b3) local_out = tf.nn.relu(local) out = tf.add(tf.matmul(local_out, W_out), b_out) return out def predict(img_data): with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, "../models/cifar10-cnn-tf1n-"+str(epochs)+"epochs.ckpt") print("Model restored.") W1_val = W1.eval() b1_val = b1.eval() W2_val = W2.eval() b2_val = b2.eval() W3_val = W3.eval() b3_val = b3.eval() W_out_val = W_out.eval() b_out_val = b_out.eval() model_x_out = model_X(img_data, W1_val, b1_val, W2_val, b2_val, W3_val, b3_val, W_out_val, b_out_val) class_num = np.argmax(model_x_out.eval(), axis=1)[0] class_name = names[np.argmax(model_x_out.eval(), axis=1)[0]] return (class_num, class_name) class_num, class_name = predict(data[3]) print('Class Num', class_num) print('Class', class_name) plt.figure() plt.title(class_name) img = np.reshape(data[3, :], (24,24)) plt.imshow(img, cmap='Greys_r') plt.axis('off') plt.tight_layout() # test out on test images def read_test_data(directory): names = unpickle('{}/batches.meta'.format(directory))['label_names'] print('names', names) data, labels = [], [] filename = '{}/test_batch'.format(directory) batch_data = unpickle(filename) if len(data) > 0: data = np.vstack((data, batch_data['data'])) labels = np.hstack((labels, batch_data['labels'])) else: data = batch_data['data'] labels = batch_data['labels'] print(np.shape(data), np.shape(labels)) data = clean(data) data = data.astype(np.float32) return names, data, labels test_names, test_data, test_labels = read_test_data('../data/cifar-10-batches-py') test_class_num, test_class_name = predict(test_data[4]) print('Test Class Num', test_class_num) print('Test Class Name', test_class_name) print('Actual Class Label', test_labels[4]) plt.figure() plt.title(test_class_name) img = np.reshape(test_data[3, :], (24,24)) plt.imshow(img, cmap='Greys_r') plt.axis('off') plt.tight_layout() # + def get_test_accuracy(test_data, test_names, test_labels): with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, "../models/cifar10-cnn-tf1n-"+str(epochs)+"epochs.ckpt") print("Model restored.") W1_val = W1.eval() b1_val = b1.eval() W2_val = W2.eval() b2_val = b2.eval() W3_val = W3.eval() b3_val = b3.eval() W_out_val = W_out.eval() b_out_val = b_out.eval() model_x_out = model_X(test_data, W1_val, b1_val, W2_val, b2_val, W3_val, b3_val, W_out_val, b_out_val) onehot_test_labels = tf.one_hot(test_labels, len(test_names), on_value=1., off_value=0., axis=-1) test_correct_pred = tf.equal(tf.argmax(model_x_out, 1), tf.argmax(onehot_test_labels, 1)) test_accuracy = tf.reduce_mean(tf.cast(test_correct_pred, tf.float32)) print('Test accuracy %f' % (test_accuracy.eval())) predictions = tf.argmax(model_x_out, 1).eval() return (predictions, tf.cast(test_correct_pred, tf.float32).eval(), onehot_test_labels.eval()) predict_vals, test_correct_preds, onehot_test_lbls = get_test_accuracy(test_data, test_names, test_labels) print(predict_vals) print(predict_vals.shape) print(test_correct_preds) print(test_correct_preds.shape) print(onehot_test_lbls.shape) # + outcome_test = label_binarize(test_labels, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) predictions_test = label_binarize(predict_vals, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) n_classes = outcome_test.shape[1] # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(outcome_test[:, i], predictions_test[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(outcome_test.ravel(), predictions_test.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic for class: '+test_names[2]) plt.legend(loc="lower right") plt.show() # Plot ROC curve plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"])) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' ''.format(test_names[i], roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') roc_mean = np.mean(np.fromiter(roc_auc.values(), dtype=float)) plt.title('ROC curve for CIFAR-10 CNN 2000 iter Tensorflow (area = %{0:0.2f})'.format(roc_mean)) plt.legend(loc="lower right") plt.show() # - def predict_img_url(url): image = color.rgb2gray(imread(url)) print(image.shape) plt.figure() plt.title('URL from Internet') plt.imshow(image, cmap='Greys_r') plt.axis('off') plt.tight_layout() new_size = 24,24 image = resize(image, new_size, anti_aliasing=True) print(image.shape) images = np.expand_dims(image, axis=0) print(images.shape) im_data = images.astype(np.float32) print(im_data.shape) prediction = predict(im_data[0]) print(prediction) print("Cropped to 24x24") plt.figure() plt.title('Pic from Internet (24x24): '+str(prediction[1])) # prediction format is (class num,label) plt.imshow(images[0], cmap='Greys_r') plt.axis('off') plt.tight_layout() return prediction # + predict_urls = [ 'http://www.torontozoo.com/adoptapond/guide_images/Green%20Frog.jpg', #frog 'https://cdn.cnn.com/cnnnext/dam/assets/160205192735-01-best-cruise-ships-disney-dream-super-169.jpg', #ship 'https://www.sailboston.com/wp-content/uploads/2016/11/amerigo-vespucci.jpg', #ship 'https://upload.wikimedia.org/wikipedia/commons/d/d9/Motorboat_at_Kankaria_lake.JPG', #ship 'https://media.wired.com/photos/5b9c3d5e7d9d332cf364ad66/master/pass/AV-Trucks-187479297.jpg', #truck 'https://images.schoolspecialty.com/images/1581176_ecommfullsize.jpg', #truck 'https://img.purch.com/w/660/aHR0cDovL3d3dy5saXZlc2NpZW5jZS5jb20vaW1hZ2VzL2kvMDAwLzEwNC84MTkvb3JpZ2luYWwvY3V0ZS1raXR0ZW4uanBn', # cat 'https://thehorse.com/wp-content/uploads/2017/01/iStock-510488648.jpg' #horse ] predicted_labels = [] for url in predict_urls: pred = predict_img_url(url) predicted_labels.append(pred) print(predicted_labels) # -
ch14/Listing 14.11 - 14.16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import pandas as pd import numpy as np # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(file_to_load) purchase_data.head() # - # ## Player Count # * Display the total number of players # player_data = purchase_data.loc[:, ['SN', 'Age', 'Gender']] player_data = player_data.drop_duplicates() player_count = player_data.count()[0] pd.DataFrame({'Total Players': [player_count]}) # ## Purchasing Analysis (Total) # * Run basic calculations to obtain number of unique items, average price, etc. # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # # + unique_count = purchase_data['Item ID'].drop_duplicates().count() price = purchase_data['Price'].mean() avg_price = '$' + str(np.round(price, decimals=2)) total_purchase = purchase_data['Price'].count() revenue = '$' + str(purchase_data['Price'].sum()) summary = pd.DataFrame({ 'Number of Unique Items' : [unique_count], 'Average Purchase Price' : [avg_price], 'Total Number of Purchases' : [total_purchase], 'Total Revenue' : [revenue] }) summary # - # ## Gender Demographics # * Percentage and Count of Male Players # # # * Percentage and Count of Female Players # # # * Percentage and Count of Other / Non-Disclosed # # # # + gender_demo_count = player_data['Gender'].value_counts() gender_demo_percent = gender_demo_count/player_count gender_demographics = pd.DataFrame({'Player Count': gender_demo_count, 'Player Percentage': gender_demo_percent}) gender_demographics['Player Percentage'] = gender_demographics['Player Percentage'].map('{:,.2%}'.format) gender_demographics # - # # ## Purchasing Analysis (Gender) # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # # # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # + purchase_count = purchase_data.groupby(['Gender']).sum()['Price'].rename('Total Purchase Value') avg_purchase_price = purchase_data.groupby(['Gender']).mean()['Price'].rename('Average Purchase Price') total_purchase_value = purchase_data.groupby(['Gender']).count()['Price'].rename('Purchase Count') avg_purchase_per_gender = purchase_count / gender_demographics['Player Count'] gender_purchase_analysis = pd.DataFrame({"Purchase Count": total_purchase_value, "Average Purchase Price": avg_purchase_price, "Total Purchase Value": purchase_count, "Player Count": avg_purchase_per_gender}) gender_purchase_analysis["Average Purchase Price"] = gender_purchase_analysis["Average Purchase Price"].map("${:,.2f}".format) gender_purchase_analysis["Total Purchase Value"] = gender_purchase_analysis["Total Purchase Value"].map("${:,.2f}".format) gender_purchase_analysis["Purchase Count"] = gender_purchase_analysis["Purchase Count"].map("{:,}".format) gender_purchase_analysis["Avg Total Purchase per Person"] = gender_purchase_analysis["Player Count"].map("${:,.2f}".format) gender_purchase_analysis = gender_purchase_analysis.loc[:, ["Purchase Count", "Average Purchase Price", "Total Purchase Value", "Avg Total Purchase per Person"]] gender_purchase_analysis # - # ## Age Demographics # * Establish bins for ages # # # * Categorize the existing players using the age bins. Hint: use pd.cut() # # # * Calculate the numbers and percentages by age group # # # * Create a summary data frame to hold the results # # # * Optional: round the percentage column to two decimal points # # # * Display Age Demographics Table # # + # Creating Bins age_bins = [0, 9.99, 14.99, 19.99, 24.99, 29.99, 34.99, 39.99, 99999] age_bin_names = ['<10', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40+'] # Categorizing Players by Age Group player_data['Age Ranges'] = pd.cut(player_data['Age'], age_bins, labels=age_bin_names) # Numbers and Percentages by Age Group age_demo_count = player_data['Age Ranges'].value_counts() age_demo_percent = age_demo_count / player_count age_demographics = pd.DataFrame({'Total Count': age_demo_count, 'Percentage of Players': age_demo_percent}) age_demographics['Percentage of Players'] = age_demographics['Percentage of Players'].map('{:,.2%}'.format) age_demographics = age_demographics.sort_index() age_demographics # - # ## Purchasing Analysis (Age) # * Bin the purchase_data data frame by age # # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # + # Catigorizing Purchases by Age purchase_data['Age Range'] = pd.cut(purchase_data['Age'], age_bins, labels=age_bin_names) # Calculations age_purchase_count = purchase_data.groupby(['Age Range']).count()['Price'].rename('Purchase Count') age_purchase_value = purchase_data.groupby(['Age Range']).sum()['Price'].rename('Total Purchase Value') age_avg_purchase = purchase_data.groupby(['Age Range']).mean()['Price'].rename('Average Purchase Price') avg_purchase_per_age = age_purchase_value / age_demographics['Total Count'] age_purchase_analysis = pd.DataFrame({'Purchase Count': age_purchase_count, 'Average Purchase Price': age_avg_purchase, 'Total Purchase Value': age_purchase_value, 'Avg Total Purchase Per Person': avg_purchase_per_age}) # Formatting age_purchase_analysis['Purchase Count'] = age_purchase_analysis['Purchase Count'].map('{:,}'.format) age_purchase_analysis['Average Purchase Price'] = age_purchase_analysis['Average Purchase Price'].map('${:,.2f}'.format) age_purchase_analysis['Total Purchase Value'] = age_purchase_analysis['Total Purchase Value'].map('${:,.2f}'.format) age_purchase_analysis['Avg Total Purchase Per Person'] = age_purchase_analysis['Avg Total Purchase Per Person'].map('${:,.2f}'.format) age_purchase_analysis = age_purchase_analysis.loc[:, ['Purchase Count', 'Average Purchase Price', 'Total Purchase Value', 'Avg Total Purchase Per Person']] age_purchase_analysis # - # ## Top Spenders # * Run basic calculations to obtain the results in the table below # # # * Create a summary data frame to hold the results # # # * Sort the total purchase value column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + # Caluclations gamer_count = purchase_data.groupby(['SN']).count()['Price'].rename('Purchase Count') gamer_avg = purchase_data.groupby(['SN']).mean()['Price'].rename('Average Purchase Price') gamer_total = purchase_data.groupby(['SN']).sum()['Price'].rename('Total Purchase Value') #Displaying and Sorting Data gamer_data = pd.DataFrame({'Purhase Count': gamer_count, 'Average Purchase Price': gamer_avg, 'Total Purchase Value': gamer_total}) gamer_data_sorted = gamer_data.sort_values('Total Purchase Value', ascending=False) # Formatting gamer_data_sorted['Average Purchase Price'] = gamer_data_sorted['Average Purchase Price'].map('${:,.2f}'.format) gamer_data_sorted['Total Purchase Value'] = gamer_data_sorted['Total Purchase Value'].map('${:,.2f}'.format) #gamer_data_sorted = gamer_data_sorted.loc[:,['Purchase Count', 'Average Purchase Price', 'Total Purchase Value']] gamer_data_sorted.head(5) # - # ## Most Popular Items # * Retrieve the Item ID, Item Name, and Item Price columns # # # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value # # # * Create a summary data frame to hold the results # # # * Sort the purchase count column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + # Pulling item data from purchase data item_data = purchase_data.loc[:, ['Item ID', 'Item Name', 'Price']] # Calculations item_count = item_data.groupby(['Item ID', 'Item Name']).count()['Price'].rename('Purchase Count') item_avg_price = item_data.groupby(['Item ID', 'Item Name']).mean()['Price'].rename('Item Price') total_item_purchase = item_data.groupby(['Item ID', 'Item Name']).sum()['Price'].rename('Total Price Value') # Creat Dataframe / Sort Data item_data_pd = pd.DataFrame({'Purchase Count': item_count, 'Item Price': item_avg_price, 'Total Purchase Value': total_item_purchase}) item_sorted = item_data_pd.sort_values('Purchase Count', ascending=False) # Formatting item_sorted['Purchase Count'] = item_sorted['Purchase Count'].map('{:,}'.format) item_sorted['Item Price'] = item_sorted['Item Price'].map('${:,.2f}'.format) item_sorted['Total Purchase Value'] = item_sorted['Total Purchase Value'].map('${:,.2f}'.format) popular_items = item_sorted.loc[:,['Purchase Count','Item Price','Total Purchase Value']] popular_items.head(5) # - # ## Most Profitable Items # * Sort the above table by total purchase value in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the data frame # # # + profit_items = item_data_pd.sort_values('Total Purchase Value', ascending=False) profit_items['Purchase Count'] = profit_items['Purchase Count'].map('{:,}'.format) profit_items['Item Price'] = profit_items['Item Price'].map('${:,.2f}'.format) profit_items['Total Purchase Value'] = profit_items['Total Purchase Value'].map('${:,.2f}'.format) profitable_items = profit_items.loc[:,['Purchase Count','Item Price','Total Purchase Value']] profitable_items.head(5) # -
HeroesOfPymoli_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SQL for Analyst 4 - Join # + # set MySQL URL user = "dz3vg" password = "" host = "localhost" port = 3306 dbname = "dataapplab_db" with open("MySQL.key", "r") as file: password = file.read() connection_string = f"mysql+mysqlconnector://{user}:{password}@{host}:{port}/{dbname}" # connect to MySQL server # %load_ext sql # %sql $connection_string # clean password password = "" connection_string = "" # - # %sql use hr; # + language="sql" # # select # * # from # employees, departments # limit 10 # ; # + language="sql" # # select # count(distinct employee_id) # from # employees # ; # + language="sql" # # select # count(distinct department_id) # from # departments # ; # + language="sql" # # select # count(*) # from # employees, departments # ; # + language="sql" # # select # * # from # employees, departments # where # employees.department_id = departments.department_id # limit 10 # ; # + language="sql" # # select # * # from # employees # where # employee_id = 100 # ; # + language="sql" # # select # * # from # departments # where # department_id = 90 # ; # + language="sql" # # select # concat(e.first_name, ' ', e.last_name) as full_name, # e.department_id, # d.department_name # from # departments as d # join # employees as e on d.department_id = e.department_id # order by d.department_id # limit 10 # ; # + language="sql" # # select # concat(e.first_name, ' ', e.last_name) as full_name, # e.department_id, # d.department_name # from # departments as d # join # employees as e on d.department_id = e.department_id # order by full_name # limit 10 # ; # + language="sql" # # select # concat(e.first_name, ' ', e.last_name) as full_name, # e.department_id, # d.department_name # from # departments as d # join # employees as e on d.department_id = e.department_id # order by full_name desc # limit 10 # ; # + language="sql" # # select # concat(e.first_name, ' ', e.last_name) as full_name, # e.department_id, # d.department_name # from # departments as d # left join # employees as e on d.department_id = e.department_id # limit 100, 10 # ; # + language="sql" # # select # concat(e.first_name, ' ', e.last_name) as full_name, # e.department_id as department_id_e, # d.department_id as department_id_d, # d.department_name # from # departments as d # left join # employees as e on d.department_id = e.department_id # limit 100, 10 # ; # + language="sql" # # select # e.first_name, # e.last_name, # d.department_id, # l.city, # l.state_province # from # employees as e # join # departments as d on e.department_id = d.department_id # join # locations as l on d.location_id = l.location_id # limit 10 # ; # + language="sql" # # select # first_name, # last_name, # salary, # (case # when salary between 1000 and 2999 then '#A' # when salary between 3000 and 5999 then '#B' # when salary between 6000 and 9999 then '#C' # when salary between 10000 and 14999 then '#D' # when salary between 15000 and 24999 then '#E' # when salary between 25000 and 40000 then '#F' # else 'G+' # end) as job_grades # from # employees # limit 10 # ; # + language="sql" # # select # e.first_name, # e.last_name, # d.department_id, # d.department_name # from # employees as e # left join # departments as d on e.department_id = d.department_id # where # e.department_id in (40, 80) # order by e.last_name # limit 10 # ; # + language="sql" # # select # e.first_name, # e.last_name, # d.department_id, # d.department_name # from # employees as e # left join # departments as d on e.department_id = d.department_id and e.department_id in (40, 80) # order by e.last_name # limit 10 # ; # + language="sql" # # select # e.first_name, # e.last_name, # d.department_id, # d.department_name # from # employees as e # join # departments as d on e.department_id = d.department_id and e.department_id in (40, 80) # order by e.last_name # limit 10 # ; # + language="sql" # # select # d.department_id, # d.department_name, # e.first_name, # e.last_name # from # employees as e # right join # departments as d on e.department_id = d.department_id # limit 10 # ; # + language="sql" # # select # d.department_id, # d.department_name, # e.first_name, # e.last_name # from # employees as e # right join # departments as d on e.department_id = d.department_id # where # e.first_name is null # limit 10 # ; # + language="sql" # # select # e.first_name, # e.last_name, # e.salary # from # employees as e # join # employees as s on e.salary < s.salary and s.employee_id = 182 # limit 10 # ; # + language="sql" # # select # first_name, # last_name, # salary # from # employees # where # salary < (select # salary # from # employees # where # employee_id = 182) # limit 10 # ; # + language="sql" # # select # country_name, # city, # department_name # from # countries # join # locations using (country_id) # join # departments using (location_id) # limit 10 # ; # - # Note: Command Execution Order # # 1. on # 2. join # 3. where # 4. select ... as alias # 5. order by
Data Scientist Bootcamp/MySQL for Analyst/SQL-for-Analyst-4-Join.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''PythonData'': conda)' # name: python379jvsc74a57bd0185682abfd9f665f5ace63c8dbf57ce50d75e1e561efde62f688fc77323a725a # --- # Study partner is <NAME>. Worked on Pseudo code together and hiding api keys. # Received assistance from instructor via office hours and tutoring sessions. # The starter code and pseudo code for this challenge was provided and can be reviewed in data folder # Resources: past class assignments and Google searches # # VacationPy # ---- # # #### Note # * Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing. # # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # !pip install gmaps # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import gmaps import os # Import API key from api_keys import g_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy if g_key == "YOUR KEY HERE!": import sys sys.path.append(r"C:\Users\Brad\Documents\GitHub\Untracked_Files") from api_keys_ATN import g_key # print(g_key) # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # - # ### Store Part I results into DataFrame # * Load the csv exported in Part I to a DataFrame cities_loop_pd=pd.read_csv('cities_loop.csv') cities_loop_pd # ### Humidity Heatmap # * Configure gmaps. # * Use the Lat and Lng as locations and Humidity as the weight. # * Add Heatmap layer to map. gmaps.configure(api_key=g_key) # + # Store 'Lat' and 'Lng' into locations locations = cities_loop_pd[["latitude", "longitude"]].astype(float) humidity_rate = cities_loop_pd["humidity"].astype(float) fig = gmaps.figure() heat_layer = gmaps.heatmap_layer(locations, weights=humidity_rate, dissipating=False, max_intensity=100, point_radius = 1) fig.add_layer(heat_layer) fig # - # ### Create new DataFrame fitting weather criteria # * Narrow down the cities to fit weather conditions. # * Drop any rows will null values. # + # Narrow down the DataFrame to find your ideal weather condition. # * A max temperature lower than 80 degrees but higher than 30. # * Wind speed less than 30 mph. # * Cloudiness 10 Ideal_weather=cities_loop_pd.loc[(cities_loop_pd['temperature'] < 80) & (cities_loop_pd['temperature'] > 30) & (cities_loop_pd['wind speed'] < 30) & (cities_loop_pd['clouds'] <10)] Ideal_weather # - # ### Hotel Map # * Store into variable named `hotel_df`. # * Add a "Hotel Name" column to the DataFrame. # * Set parameters to search for hotels with 5000 meters. # * Hit the Google Places API for each city's coordinates. # * Store the first Hotel result into the DataFrame. # * Plot markers on top of the heatmap. # + # new variable name hotel_df=Ideal_weather.copy() # Create a new column hotel_df['Hotel Name'] = "" hotel_df # + tags=[] # setting paramaters to search # <NAME>, cohort from class, suggested I use hotel for target type and keyword instead of lodging. target_radius = 5000 target_type = "Hotel" keyword= "hotel" key="g_key" # set up a parameters dictionary params = { "radius": target_radius, "type": target_type, "key": g_key, "keyword": keyword } # set forloop for index, row in hotel_df.iterrows(): lat=row['latitude'] lon=row['longitude'] params['location'] =f"{lat},{lon}" # base url base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" # run a request using our params dictionary response = requests.get(base_url, params=params) places_data = response.json() try: hotel_df.loc[index, "Hotel Name"]=places_data['results'][0]['name'] print(places_data['results'][0]['name']) except: print("Hotel not Found") hotel_df.loc[index, "Hotel Name"]="Hotel not Found" # - hotel_df # + # NOTE: Do not change any of the code in this cell # Using the template add the hotel marks to the heatmap info_box_template = """ <dl> <dt>Name</dt><dd>{Hotel Name}</dd> <dt>City</dt><dd>{city}</dd> <dt>Country</dt><dd>{country}</dd> </dl> """ #***I had to use lowercase city and country in order for the code to work*** # Store the DataFrame Row # NOTE: be sure to update with your DataFrame name hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()] locations = hotel_df[["latitude", "longitude"]] # + # Add marker layer ontop of heat map marker_layer = gmaps.marker_layer( locations,info_box_content=hotel_info) fig = gmaps.figure() fig.add_layer(marker_layer) fig # Display figure # -
Vacation_py/VacationPy_ATN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.4 64-bit (windows store) # language: python # name: python3 # --- # + #quote https://www.programiz.com/dsa/b-plus-tree # - """ B+ 树的属性 所有叶子都处于同一水平。 根至少有两个孩子。 除 root 外的每个节点最多可以有m个孩子,至少m/2 个孩子。 每个节点最多可以包含m - 1 键和至少⌈m/2⌉ - 1 键。 """ # B+ tee in python import math # Node creation class Node: def __init__(self, order: int): self.order = order self.values = [] self.keys = [] self.nextKey = None self.parent = None self.check_leaf = False # Insert at the leaf def insert_at_leaf(self, leaf, value : int, key: int): if (self.values): temp1 = self.values for i in range(len(temp1)): if (value == temp1[i]): self.keys[i].append(key) break elif (value < temp1[i]): self.values = self.values[:i] + [value] + self.values[i:] self.keys = self.keys[:i] + [[key]] + self.keys[i:] break elif (i + 1 == len(temp1)): self.values.append(value) self.keys.append([key]) break else: self.values = [value] self.keys = [[key]] # + # B plus tree class BplusTree: def __init__(self, order): self.root = Node(order) self.root.check_leaf = True # Insert operation def insert(self, value, key): value = str(value) old_node = self.search(value) old_node.insert_at_leaf(old_node, value, key) if (len(old_node.values) == old_node.order): node1 = Node(old_node.order) node1.check_leaf = True node1.parent = old_node.parent mid = int(math.ceil(old_node.order / 2)) - 1 node1.values = old_node.values[mid + 1:] node1.keys = old_node.keys[mid + 1:] node1.nextKey = old_node.nextKey old_node.values = old_node.values[:mid + 1] old_node.keys = old_node.keys[:mid + 1] old_node.nextKey = node1 self.insert_in_parent(old_node, node1.values[0], node1) # Search operation for different operations def search(self, value): current_node = self.root while(current_node.check_leaf == False): temp2 = current_node.values for i in range(len(temp2)): if (value == temp2[i]): current_node = current_node.keys[i + 1] break elif (value < temp2[i]): current_node = current_node.keys[i] break elif (i + 1 == len(current_node.values)): current_node = current_node.keys[i + 1] break return current_node # Find the node def find(self, value, key): l = self.search(value) for i, item in enumerate(l.values): if item == value: if key in l.keys[i]: return True else: return False return False # Inserting at the parent def insert_in_parent(self, n, value, ndash): if (self.root == n): rootNode = Node(n.order) rootNode.values = [value] rootNode.keys = [n, ndash] self.root = rootNode n.parent = rootNode ndash.parent = rootNode return parentNode = n.parent temp3 = parentNode.keys for i in range(len(temp3)): if (temp3[i] == n): parentNode.values = parentNode.values[:i] + \ [value] + parentNode.values[i:] parentNode.keys = parentNode.keys[:i + 1] + [ndash] + parentNode.keys[i + 1:] if (len(parentNode.keys) > parentNode.order): parentdash = Node(parentNode.order) parentdash.parent = parentNode.parent mid = int(math.ceil(parentNode.order / 2)) - 1 parentdash.values = parentNode.values[mid + 1:] parentdash.keys = parentNode.keys[mid + 1:] value_ = parentNode.values[mid] if (mid == 0): parentNode.values = parentNode.values[:mid + 1] else: parentNode.values = parentNode.values[:mid] parentNode.keys = parentNode.keys[:mid + 1] for j in parentNode.keys: j.parent = parentNode for j in parentdash.keys: j.parent = parentdash self.insert_in_parent(parentNode, value_, parentdash) # Delete a node def delete(self, value, key): node_ = self.search(value) temp = 0 for i, item in enumerate(node_.values): if item == value: temp = 1 if key in node_.keys[i]: if len(node_.keys[i]) > 1: node_.keys[i].pop(node_.keys[i].index(key)) elif node_ == self.root: node_.values.pop(i) node_.keys.pop(i) else: node_.keys[i].pop(node_.keys[i].index(key)) del node_.keys[i] node_.values.pop(node_.values.index(value)) self.deleteEntry(node_, value, key) else: print("Value not in Key") return if temp == 0: print("Value not in Tree") return # Delete an entry def deleteEntry(self, node_, value, key): if not node_.check_leaf: for i, item in enumerate(node_.keys): if item == key: node_.keys.pop(i) break for i, item in enumerate(node_.values): if item == value: node_.values.pop(i) break if self.root == node_ and len(node_.keys) == 1: self.root = node_.keys[0] node_.keys[0].parent = None del node_ return elif (len(node_.keys) < int(math.ceil(node_.order / 2)) and node_.check_leaf == False) or (len(node_.values) < int(math.ceil((node_.order - 1) / 2)) and node_.check_leaf == True): is_predecessor = 0 parentNode = node_.parent PrevNode = -1 NextNode = -1 PrevK = -1 PostK = -1 for i, item in enumerate(parentNode.keys): if item == node_: if i > 0: PrevNode = parentNode.keys[i - 1] PrevK = parentNode.values[i - 1] if i < len(parentNode.keys) - 1: NextNode = parentNode.keys[i + 1] PostK = parentNode.values[i] if PrevNode == -1: ndash = NextNode value_ = PostK elif NextNode == -1: is_predecessor = 1 ndash = PrevNode value_ = PrevK else: if len(node_.values) + len(NextNode.values) < node_.order: ndash = NextNode value_ = PostK else: is_predecessor = 1 ndash = PrevNode value_ = PrevK if len(node_.values) + len(ndash.values) < node_.order: if is_predecessor == 0: node_, ndash = ndash, node_ ndash.keys += node_.keys if not node_.check_leaf: ndash.values.append(value_) else: ndash.nextKey = node_.nextKey ndash.values += node_.values if not ndash.check_leaf: for j in ndash.keys: j.parent = ndash self.deleteEntry(node_.parent, value_, node_) del node_ else: if is_predecessor == 1: if not node_.check_leaf: ndashpm = ndash.keys.pop(-1) ndashkm_1 = ndash.values.pop(-1) node_.keys = [ndashpm] + node_.keys node_.values = [value_] + node_.values parentNode = node_.parent for i, item in enumerate(parentNode.values): if item == value_: p.values[i] = ndashkm_1 break else: ndashpm = ndash.keys.pop(-1) ndashkm = ndash.values.pop(-1) node_.keys = [ndashpm] + node_.keys node_.values = [ndashkm] + node_.values parentNode = node_.parent for i, item in enumerate(p.values): if item == value_: parentNode.values[i] = ndashkm break else: if not node_.check_leaf: ndashp0 = ndash.keys.pop(0) ndashk0 = ndash.values.pop(0) node_.keys = node_.keys + [ndashp0] node_.values = node_.values + [value_] parentNode = node_.parent for i, item in enumerate(parentNode.values): if item == value_: parentNode.values[i] = ndashk0 break else: ndashp0 = ndash.keys.pop(0) ndashk0 = ndash.values.pop(0) node_.keys = node_.keys + [ndashp0] node_.values = node_.values + [ndashk0] parentNode = node_.parent for i, item in enumerate(parentNode.values): if item == value_: parentNode.values[i] = ndash.values[0] break if not ndash.check_leaf: for j in ndash.keys: j.parent = ndash if not node_.check_leaf: for j in node_.keys: j.parent = node_ if not parentNode.check_leaf: for j in parentNode.keys: j.parent = parentNode # Print the tree def printTree(tree): lst = [tree.root] level = [0] leaf = None flag = 0 lev_leaf = 0 node1 = Node(str(level[0]) + str(tree.root.values)) while (len(lst) != 0): x = lst.pop(0) lev = level.pop(0) if (x.check_leaf == False): for i, item in enumerate(x.keys): print(item.values) else: for i, item in enumerate(x.keys): print(item.values) if (flag == 0): lev_leaf = lev leaf = x flag = 1 record_len = 3 bplustree = BplusTree(record_len) bplustree.insert('5', '33') bplustree.insert('15', '21') bplustree.insert('25', '31') bplustree.insert('35', '41') bplustree.insert('45', '10') printTree(bplustree) if(bplustree.find('5', '34')): print("Found") else: print("Not found") # -
backend/sql/B+tree/B-tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tensorflow_p27] # language: python # name: conda-env-tensorflow_p27-py # --- # + # PROCESS FULL OPEN IMAGES V4 DATASET: # # Step #1 - parse the csv files and build a list of image ids for # specific class (e.g. dogs). import os import csv import shutil TARGET_LABEL = 'Dog' # modify this line IMAGE_LABELS = [ '../openimages_v4/train-annotations-human-imagelabels.csv', '../openimages_v4/validation-annotations-human-imagelabels.csv', '../openimages_v4/test-annotations-human-imagelabels.csv', ] label_names = {} # class label => name name_labels = {} # class name => label with open('../openimages_v4/class-descriptions.csv', mode='r') as infile: reader = csv.reader(infile) for rows in reader: label_names[rows[0]] = rows[1] name_labels[rows[1]] = rows[0] labelled_images = {} # label => images for csvfile in IMAGE_LABELS: with open(csvfile, mode='r') as infile: reader = csv.reader(infile) firstRow = True for rows in reader: if firstRow: # ignore first row firstRow = False continue if rows[3] != '1': # ignore negative labels continue labelled_images.setdefault(rows[2], set()).add(rows[0]) label = name_labels[TARGET_LABEL] images = labelled_images[label] print ('%d images found with \'%s\' label' % (len(images), TARGET_LABEL)) # + # PROCESS FULL OPEN IMAGES V4 DATASET: # # Step #2 - parse the csv files and build a list of image links for # all match image ids (e.g. dogs). Also, create a TSV file for using # with Google Cloud Storage Transfer. TARGET_TSV_FILE = 'all-dog-images.tsv' # modify this line IMAGE_URLS = [ '../openimages_v4/train-images-with-labels-with-rotation.csv', '../openimages_v4/validation-images-with-rotation.csv', '../openimages_v4/test-images-with-rotation.csv', ] image_urls = {} # image id => url tsvfile = open(TARGET_TSV_FILE, 'w') # tsv file. tsvfile.write('TsvHttpData-1.0\n') for csvfile in IMAGE_URLS: with open(csvfile, mode='r') as infile: reader = csv.reader(infile) firstRow = True for rows in reader: if firstRow: # ignore first row firstRow = False continue if rows[0] in images: image_urls[rows[0]] = rows[2] tsvfile.write('\t'.join([rows[2], rows[8], rows[9]]) + '\n') tsvfile.close() print ('%d image urls found with \'%s\' label' % (len(image_urls), TARGET_LABEL)) # + # PROCESS FULL OPEN IMAGES V4 DATASET: # # Step #3 - process the downloaded images and reorganize them # by their image ids. # modify these lines DOWNLOADS_FOLDER = '/home/ubuntu/datasets/openimages_all_dogs/downloads' PROCESSED_IMAGES = '/home/ubuntu/datasets/openimages_all_dogs/dog' for k,v in image_urls.iteritems(): path = v.replace('http://', '').replace('https://', '') path = os.path.join(DOWNLOADS_FOLDER, path) if os.path.exists(path): shutil.copyfile(path, os.path.join(PROCESSED_IMAGES, k + '.jpg')) else: print ('Image not found: %s' % k) print ('Done.') # + # PROCESS FULL OPEN IMAGES V4 DATASET: # # Rename all images by adding the class name as its prefix. import os import csv import shutil DATASET_PATH = './output_cdn_78k_v4/train/not_pet' CLASSES = ['Cat', 'Dog', 'Horse', 'Bird', 'Fish', 'Reptile', 'Frog', 'Hamster', 'Rabbit', 'Snail', 'Boy', 'Girl', 'Woman', 'Man', 'Baby', 'Cattle'] IMAGE_LABELS = [ '../openimages_v4/train-annotations-human-imagelabels.csv', '../openimages_v4/validation-annotations-human-imagelabels.csv', '../openimages_v4/test-annotations-human-imagelabels.csv', ] label_names = {} # class label => name name_labels = {} # class name => label with open('../openimages_v4/class-descriptions.csv', mode='r') as infile: reader = csv.reader(infile) for rows in reader: label_names[rows[0]] = rows[1] name_labels[rows[1]] = rows[0] labelled_images = {} # label => images for csvfile in IMAGE_LABELS: with open(csvfile, mode='r') as infile: reader = csv.reader(infile) firstRow = True for rows in reader: if firstRow: # ignore first row firstRow = False continue if rows[3] != '1': # ignore negative labels continue labelled_images.setdefault(rows[2], set()).add(rows[0] + '.jpg') for img in os.listdir(DATASET_PATH): prefix = None for cls in CLASSES: # search in pet classes firstly if img in labelled_images[name_labels[cls]]: prefix = cls.lower().replace(' ', '_').replace('&', '_') break if not prefix: for lb, imgs in labelled_images.iteritems(): cls = label_names[lb] if cls not in CLASSES and img in imgs: prefix = cls.lower().replace(' ', '_').replace('&', '_') break if prefix: os.rename(os.path.join(DATASET_PATH, img), os.path.join(DATASET_PATH, prefix + '_' + img)) #print (img + ' ' + prefix) else: print ('Not found: ' + img) print ('Done.') # - # + # PROCESS 601 CLASSES OPEN IMAGES V4 DATASET: # # Step #1 - build up a simple 'class => images' mapping and then we will # create customized datasets like 'not vs not dog' upon it. import os import csv import shutil def _copy_files(images, dstfolder): if not os.path.exists(dstfolder): os.makedirs(dstfolder) for image in images: if os.path.exists(image): shutil.copy(image, dstfolder) else: print ('Image not found: %s' % image) print ('%d images copied to %s' % (len(images), dstfolder)) IMAGE_LABELS = { '../openimages_v4/training': '../openimages_v4/train-annotations-human-imagelabels-boxable.csv', '../openimages_v4/validation': '../openimages_v4/validation-annotations-human-imagelabels-boxable.csv', '../openimages_v4/testing': '../openimages_v4/test-annotations-human-imagelabels-boxable.csv', } label_names = {} # class label => name name_labels = {} # class name => label with open('../openimages_v4/class-descriptions-boxable.csv', mode='r') as infile: reader = csv.reader(infile) for rows in reader: label_names[rows[0]] = rows[1] name_labels[rows[1]] = rows[0] labelled_images = {} # label => images for folder, csvfile in IMAGE_LABELS.iteritems(): with open(csvfile, mode='r') as infile: reader = csv.reader(infile) firstRow = True for rows in reader: if firstRow: # ignore first row firstRow = False continue if rows[3] != '1': # ignore negative labels continue labelled_images.setdefault(rows[2], set()).add(os.path.join(folder, rows[0] + '.jpg')) # + # PROCESS 601 CLASSES OPEN IMAGES V4 DATASET: # # Step #2.1 - make a 'dog' vs 'not dog' dataset by using all # reviewed dog images, and using class-weights when build the # not dog images, and then split train/val/test using pre- # defined precentages. import random import math DATASET_PATH = './output_dnd_69k_0918' TRAIN_VAL_SPLIT = 0.8 if os.path.exists(DATASET_PATH): shutil.rmtree(DATASET_PATH) os.makedirs(DATASET_PATH) # 1) copy all reviewed 'dog' images. total_dog_images = 0 folder = './output_dnd_accepted/dog' images = [os.path.join(folder,x) for x in os.listdir(folder)] random.shuffle(images) print ('%d dog images in Google dataset.' % len(images)) _copy_files(images[:int(len(images) * TRAIN_VAL_SPLIT)], os.path.join(DATASET_PATH, 'train', 'dog')) _copy_files(images[int(len(images) * TRAIN_VAL_SPLIT):], os.path.join(DATASET_PATH, 'val', 'dog')) total_dog_images += len(images) folder = './output_dnd_accepted/kaggle_dog' images = [os.path.join(folder,x) for x in os.listdir(folder)] random.shuffle(images) print ('%d dog images in Kaggle dataset.' % len(images)) _copy_files(images[:int(len(images) * TRAIN_VAL_SPLIT)], os.path.join(DATASET_PATH, 'train', 'dog')) _copy_files(images[int(len(images) * TRAIN_VAL_SPLIT):], os.path.join(DATASET_PATH, 'val', 'dog')) total_dog_images += len(images) # 2) copy 'not dog' images. class_weights = { 'Cat': .01, 'Horse': .01, 'Cattle': .01, 'Boy': .075, 'Woman': .075, 'Man': .075, 'Girl': .075, 'Baby': '.075|../openimages_all_babies/babies' } class_images = {} total_not_dog_images = total_dog_images # same as reviewed dog images. for name,weight in class_weights.iteritems(): if type(weight) is str: [weight, source] = weight.split('|') labelled_images[name] = set([os.path.join(source, x) for x in os.listdir(source)]) label_names[name] = name name_labels[name] = name class_images[name] = int(math.ceil(float(weight) * total_dog_images)) total_not_dog_images -= class_images[name] dog_label = name_labels['Dog'] avg_not_dog_images = int(math.ceil(total_not_dog_images * 1.0 / (len(labelled_images) - 1 - len(class_weights)))) + 2 for lb, imgs in labelled_images.iteritems(): if lb == dog_label: continue copy_total = 0 name = label_names[lb] if name in class_weights: copy_total = class_images[name] else: copy_total = avg_not_dog_images imgs = [img for img in imgs if img not in labelled_images[dog_label]] random.shuffle(imgs) imgs = imgs[:copy_total] print ('Copy \'%s\' images.' % name) _copy_files(imgs[:int(len(imgs) * TRAIN_VAL_SPLIT)], os.path.join(DATASET_PATH, 'train', 'not_dog')) _copy_files(imgs[int(len(imgs) * TRAIN_VAL_SPLIT):], os.path.join(DATASET_PATH, 'val', 'not_dog')) print ('Done.') # - # + # PROCESS 601 CLASSES OPEN IMAGES V4 DATASET: # # Step #2.2 (outdated) - make a 'dog' vs 'not dog' dataset # by using all reviewed dog images, and split train/val/test # using pre-defined precentages. import random # make a 'dog' vs 'not dog' dataset. DATASET_PATH = './output_dnd_22k' TRAIN_SET = 0.7 VALID_SET = 0.2 TEST_SET = 0.1 # 0) prepare. if os.path.exists(DATASET_PATH): shutil.rmtree(DATASET_PATH) os.makedirs(DATASET_PATH) # # 1) copy 'dog' images. label = name_labels['Dog'] #images = list(labelled_images[label]) folder = './output_dnd_accepted/dog' images = [os.path.join(folder,x) for x in os.listdir(folder)] random.shuffle(images) print ('%d images found with \'Dog\'' % len(images)) _copy_files(images[:int(len(images) * 0.7)], os.path.join(DATASET_PATH, 'train', 'dog')) _copy_files(images[int(len(images) * 0.7):int(len(images) * 0.9)], os.path.join(DATASET_PATH, 'val', 'dog')) _copy_files(images[int(len(images) * 0.9):], os.path.join(DATASET_PATH, 'test', 'dog')) # 2) copy 'not dog' images. avg_not_dog = len(images) / (len(label_names) - 1) + 3 for lb, imgs in labelled_images.iteritems(): if lb != label: # not dog imgs = [img for img in imgs if img not in labelled_images[label]] # not dog random.shuffle(imgs) imgs = imgs[:avg_not_dog] # take first n images _copy_files(imgs[:int(len(imgs) * 0.7)], os.path.join(DATASET_PATH, 'train', 'not_dog')) _copy_files(imgs[int(len(imgs) * 0.7):int(len(imgs) * 0.9)], os.path.join(DATASET_PATH, 'val', 'not_dog')) _copy_files(imgs[int(len(imgs) * 0.9):], os.path.join(DATASET_PATH, 'test', 'not_dog')) print ('Done.') # + # PROCESS 601 CLASSES OPEN IMAGES V4 DATASET: # # Step #2.3 (outdated) - make a 'dog' vs 'not dog' dataset # by using all dog images. # 0) prepare. if os.path.exists('./output_dnd'): shutil.rmtree('./output_dnd') os.makedirs('./output_dnd/dog') os.makedirs('./output_dnd/not_dog') # 1) copy 'dog' images. label = name_labels['Dog'] images = labelled_images[label] print ('%d images found with \'Dog\'' % len(images)) _copy_files(images, './output_dnd/dog') # 2) copy 'not dog' images. avg_not_dog = len(images) / (len(label_names) - 1) + 12 for lb, imgs in labelled_images.iteritems(): if lb != label: # not dog imgs = [img for img in imgs if img not in images] # not dog imgs = imgs[:avg_not_dog] # take first n images #print ('%d images found with \'%s\'' % (len(imgs), label_names[lb])) _copy_files(imgs, './output_dnd/not_dog') print ('Done.') # + # PROCESS 601 CLASSES OPEN IMAGES V4 DATASET: # # Step #2.4 (outdated) - make a 'dog' vs 'not dog' dataset # by using only a few dog images, and a few not dog images # from a limited set of classes. import random # make a small 'dog' vs 'not dog' dataset. DATASET_PATH = './output_dnd_1k' DATASET_SIZE = 1000 TRAIN_SET = 0.7 VALID_SET = 0.2 TEST_SET = 0.1 # 0) prepare. if os.path.exists(DATASET_PATH): shutil.rmtree(DATASET_PATH) os.makedirs(DATASET_PATH) # 1) copy 'dog' images. label = name_labels['Dog'] images = list(labelled_images[label]) random.shuffle(images) images = images[:(DATASET_SIZE/2)] print ('%d images for \'Dog\'' % len(images)) _copy_files(images[:int(len(images) * 0.7)], os.path.join(DATASET_PATH, 'train', 'dog')) _copy_files(images[int(len(images) * 0.7):int(len(images) * 0.9)], os.path.join(DATASET_PATH, 'val', 'dog')) _copy_files(images[int(len(images) * 0.9):], os.path.join(DATASET_PATH, 'test', 'dog')) # 2) copy 'not_dog' images. not_dog = ["Human beard","Towel","Home appliance","Boy","Cat","Door","Scarf","Pillow","Woman","Horse","Sofa bed","Human hair","Curtain","Man","Girl","Pig","Loveseat","Television","Vehicle","Human face","Kitchen & dining room table","Human body","Sock","Desk","Chair"] avg_not_dog = DATASET_SIZE / 2 / len(not_dog) for cls in not_dog: lb = name_labels[cls] imgs = list(labelled_images[lb]) random.shuffle(imgs) imgs = imgs[:avg_not_dog] _copy_files(imgs[:int(len(imgs) * 0.7)], os.path.join(DATASET_PATH, 'train', 'not_dog')) _copy_files(imgs[int(len(imgs) * 0.7):int(len(imgs) * 0.9)], os.path.join(DATASET_PATH, 'val', 'not_dog')) _copy_files(imgs[int(len(imgs) * 0.9):], os.path.join(DATASET_PATH, 'test', 'not_dog')) print ('Done.') # + # PROCESS 601 CLASSES OPEN IMAGES V4 DATASET: # # Step #2.5 (outdated) - make a 'cat', dog', 'not pet' dataset # by using only a few images from a limited set of classes. import random # make a small 'cat', dog', 'not pet' dataset. DATASET_PATH = './output_cdn_1k' DATASET_SIZE = 1000 TRAIN_SET = 0.7 VALID_SET = 0.2 TEST_SET = 0.1 # 0) prepare. if os.path.exists(DATASET_PATH): shutil.rmtree(DATASET_PATH) os.makedirs(DATASET_PATH) # 1) copy 'dog' images. label = name_labels['Dog'] images = list(labelled_images[label]) random.shuffle(images) images = images[:(DATASET_SIZE/10*2)] print ('%d images for \'Dog\'' % len(images)) _copy_files(images[:int(len(images) * 0.7)], os.path.join(DATASET_PATH, 'train', 'dog')) _copy_files(images[int(len(images) * 0.7):int(len(images) * 0.9)], os.path.join(DATASET_PATH, 'val', 'dog')) _copy_files(images[int(len(images) * 0.9):], os.path.join(DATASET_PATH, 'test', 'dog')) # 2) copy 'cat' images. label = name_labels['Cat'] images = list(labelled_images[label]) random.shuffle(images) images = images[:(DATASET_SIZE/10*2)] print ('%d images for \'Cat\'' % len(images)) _copy_files(images[:int(len(images) * 0.7)], os.path.join(DATASET_PATH, 'train', 'cat')) _copy_files(images[int(len(images) * 0.7):int(len(images) * 0.9)], os.path.join(DATASET_PATH, 'val', 'cat')) _copy_files(images[int(len(images) * 0.9):], os.path.join(DATASET_PATH, 'test', 'cat')) # 3) copy 'not_dog' images. not_dog = ["Human beard","Towel","Home appliance","Boy","Door","Scarf","Pillow","Woman","Horse","Sofa bed","Human hair","Curtain","Man","Girl","Pig","Loveseat","Television","Vehicle","Human face","Human body"] avg_not_dog = DATASET_SIZE / 10 * 6 / len(not_dog) for cls in not_dog: lb = name_labels[cls] imgs = list(labelled_images[lb]) random.shuffle(imgs) imgs = imgs[:avg_not_dog] _copy_files(imgs[:int(len(imgs) * 0.7)], os.path.join(DATASET_PATH, 'train', 'not_pet')) _copy_files(imgs[int(len(imgs) * 0.7):int(len(imgs) * 0.9)], os.path.join(DATASET_PATH, 'val', 'not_pet')) _copy_files(imgs[int(len(imgs) * 0.9):], os.path.join(DATASET_PATH, 'test', 'not_pet')) print ('Done.') # -
openimages_utilities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import cv2 import glob import matplotlib.pyplot as plt import time time.time() col, row = 4, 5 objp = np.zeros((col*row,3), np.float32) objp[:,:2] = np.mgrid[0:row, 0:col].T.reshape(-1,2) objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # + import numpy as np import cv2 import glob import matplotlib.pyplot as plt # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((col*row,3), np.float32) objp[:,:2] = np.mgrid[0:row, 0:col].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('/media/commaai-03/Data/tmp2/calibrationdata/*.png') ''' # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (6,4), None) print(ret) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (6,4), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() ''' pass # - for img in images: gray = cv2.imread(img) ret, corners = cv2.findChessboardCorners(gray, (row,col), None) if ret: objpoints.append(objp) imgpoints.append(corners) img_size = (gray.shape[1], gray.shape[0]) img_size imgpoints ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) dst = cv2.undistort(img, mtx, dist, None, mtx) mtx
ROS/Kinect2/checkBoard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 前処理 # --- import pandas as pd from sklearn.preprocessing import StandardScaler, LabelEncoder import numpy as np train = pd.read_csv("./data/raw/train.csv") test = pd.read_csv("./data/raw/test.csv") train.head() # + def holiday(x): '''前日が休日か、休日明けかをフラグ化''' if x == "金": return 2 elif x == "月": return 1 else: return 0 train["holiday"] = train.week.apply(holiday) test["holiday"] = test.week.apply(holiday) la = LabelEncoder() la.fit(train.week) train["week"] = la.fit_transform(train.week) test["week"] = la.fit_transform(test.week) print(la.classes_) # + train["kcal_isna"] = train.kcal.isnull().astype(int) test["kcal_isna"] = test.kcal.isnull().astype(int) # 曜日ごとにカロリーに差が見られたので、欠損補完する k_mean_mon = train[train.week == 0].loc[:, ["kcal"]].mean() k_mean_tues = train[train.week == 1].loc[:, ["kcal"]].mean() k_mean_wed = train[train.week == 2].loc[:, ["kcal"]].mean() k_mean_thr = train[train.week == 3].loc[:, ["kcal"]].mean() k_mean_fri = train[train.week == 4].loc[:, ["kcal"]].mean() mon_tr = train[train.week == 0].loc[:, ["kcal"]].fillna(k_mean_mon) mon_te = test[test.week == 0].loc[:, ["kcal"]].fillna(k_mean_mon) tues_tr = train[train.week == 1].loc[:, ["kcal"]].fillna(k_mean_tues) tues_te = test[test.week == 1].loc[:, ["kcal"]].fillna(k_mean_tues) wed_tr = train[train.week == 2].loc[:, ["kcal"]].fillna(k_mean_wed) wed_te = test[test.week == 2].loc[:, ["kcal"]].fillna(k_mean_wed) thr_tr = train[train.week == 3].loc[:, ["kcal"]].fillna(k_mean_thr) thr_te = test[test.week == 3].loc[:, ["kcal"]].fillna(k_mean_thr) fri_tr = train[train.week == 4].loc[:, ["kcal"]].fillna(k_mean_fri) fri_te = test[test.week == 4].loc[:, ["kcal"]].fillna(k_mean_fri) train["kcal"] = pd.concat([mon_tr, tues_tr, wed_tr, thr_tr, fri_tr]) test["kcal"] = pd.concat([mon_te, tues_te, wed_te, thr_te, fri_te]) # - carry = train[train.remarks == "お楽しみメニュー"].name.value_counts() carry = carry.index[:3] # remarksの存在する際にはカレーの割合が高い train["carry"] = train.name.apply(lambda x: 1 if x in carry else 0) test["carry"] = test.name.apply(lambda x: 1 if x in carry else 0) # remarksの頻度が多くないので特長量から削除する train["remarks_isna"] = train.remarks.isnull().astype(int) test["remarks_isna"] = test.remarks.isnull().astype(int) train.drop(["remarks"], axis=1, inplace=True) test.drop(["remarks"], axis=1, inplace=True) train["event_isna"] = train.event.isnull().astype(int) test["event_isna"] = test.event.isnull().astype(int) # eventへのラベルエンコーダー la = LabelEncoder() la.fit(train.event.fillna("none")) train["event"] = la.transform(train.event.fillna("none")) test["event"] = la.transform(test.event.fillna("none")) # + weather_type = train.weather.value_counts().index train["weather"] = train.weather.map({weather_type[0]: 0, weather_type[1]: 1, weather_type[2]: 0, weather_type[3]: 1, weather_type[4]: 2, weather_type[5]: 2, weather_type[6]: 2}) test["weather"] = test.weather.map({weather_type[0]: 0, weather_type[1]: 1, weather_type[2]: 0, weather_type[3]: 1, weather_type[4]: 2, weather_type[5]: 2, weather_type[6]: 2}) train["weather_before1"] = train.weather.shift(-1).fillna(0).astype(int) test["weather_before1"] = test.weather.shift(-1).fillna(train.weather.iloc[-1]).astype(int) # - train["precipitation"] = train.precipitation.apply(lambda x: 0 if x == "--" else x) test["precipitation"] = test.precipitation.apply(lambda x: 0 if x == "--" else x) # + train["payday"] = train.payday.fillna(0) train["payday"] = train.payday.astype(int) test["payday"] = test.payday.fillna(0) test["payday"] = test.payday.astype(int) train["payday_before1"] = train.payday.shift(-1).fillna(0).astype(int) train["payday_after1"] = train.payday.shift(1).fillna(0).astype(int) test["payday_before1"] = test.payday.shift(-1).fillna(0).astype(int) test["payday_after1"] = test.payday.shift(1).fillna(train.payday.iloc[-1]).astype(int) # + # 時系列変換をする train["datetime"] = pd.to_datetime(train.datetime) test["datetime"] = pd.to_datetime(test.datetime) train["year"] = train.datetime.dt.year train["month"] = train.datetime.dt.month train["quarter"] = train.datetime.dt.quarter train["day"] = train.datetime.dt.day train["day"] = (train.day-1)/(31-1) y_2013 = train.loc[train.year == 2013, "y"].mean() y_2014 = train.loc[train.year == 2014, "y"].mean() # 年度によって売れ値に開きがあるのでそれぞれで埋める train["sale_before1"] = train.y.shift(-1).fillna(y_2013) train["moving_avg"] = train.y.shift(-1).rolling(window=5).mean().fillna(y_2013) test["year"] = test.datetime.dt.year test["month"] = test.datetime.dt.month test["quarter"] = test.datetime.dt.quarter test["day"] = test.datetime.dt.day test["day"] = (test.day-1)/(31-1) # + # 季節別の温度平均の割合 q_1_tem = train.loc[train.quarter == 1, "temperature"].mean() q_2_tem = train.loc[train.quarter == 2, "temperature"].mean() q_3_tem = train.loc[train.quarter == 3, "temperature"].mean() q_4_tem = train.loc[train.quarter == 4, "temperature"].mean() q_1r = train.loc[train.quarter == 1, "temperature"] / q_1_tem q_2r = train.loc[train.quarter == 2, "temperature"] / q_2_tem q_3r = train.loc[train.quarter == 3, "temperature"] / q_3_tem q_4r = train.loc[train.quarter == 4, "temperature"] / q_4_tem train["temperature_avg"] = pd.concat([q_1r, q_2r, q_3r, q_4r]) test["temperature_avg"] = test.temperature/q_4_tem # - # 不要なカラムの削除 train.drop(["name", "datetime"], axis=1, inplace=True) test.drop(["name", "datetime"], axis=1, inplace=True) # + # # monthのターゲットエンコーダーをする from src.utils.category_transform import TargetEncoder train_ = train.copy() tag = TargetEncoder() tag.fit(train.drop(["y"], axis=1), train[["y"]], col="month") train, test = tag.transform(test) month_mean = train.loc[train.year == 2014, "month"].mean() test["month"] = test.month.fillna(month_mean) train["y"] = train_["y"] # - train.corr().style.background_gradient(cmap="coolwarm") train.head() train.to_csv("./data/processed/train.csv", index=False) test.to_csv("./data/processed/test.csv", index=False)
notebook/0901.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Noisy vs noiseless NN, linear regression, and logistic regression # + import numpy as np import matplotlib.pyplot as plt import matplotlib import dotenv import pandas as pd import mlflow import plotly import plotly.graph_objects as go import plotly.express as px import plotly.subplots import plotly.io as pio import typing import os import shutil import sys # - EXPORT = False SHOW_TITLES = not EXPORT EXPORT_DIR_NAME = 'teaser_plot' NN_EXPERIMENT_NAME = 'teaser_plot_nn' NN_NOISELESS_EXPERIMENT_NAME = 'neural_networks_teaser_plot_noiseless' LINREG_EXPERIMENT_NAME = 'teaser_plot_linreg' LOGREG_EXPERIMENT_NAME = 'teaser_plot_logreg' # + # Load environment variables dotenv.load_dotenv() # Enable loading of the project module MODULE_DIR = os.path.join(os.path.abspath(os.path.join(os.path.curdir, os.pardir)), 'src') sys.path.append(MODULE_DIR) # - # %load_ext autoreload # %autoreload 2 import interpolation_robustness as ir # + if EXPORT: EXPORT_DIR = os.path.join(ir.util.REPO_ROOT_DIR, 'logs', f'export_{EXPORT_DIR_NAME}') print('Using export directory', EXPORT_DIR) if os.path.exists(EXPORT_DIR): shutil.rmtree(EXPORT_DIR) os.makedirs(EXPORT_DIR) def export_fig(fig: plt.Figure, filename: str): # If export is disabled then do nothing if EXPORT: export_path = os.path.join(EXPORT_DIR, filename) fig.savefig(export_path) print('Exported figure at', export_path) # + FIGURE_SIZE = (1.8, 1.7) LEGEND_FIGURE_SIZE_NN = (1.8, 0.5) LEGEND_FIGURE_SIZE_OTHERS = (2.4, 0.5) LEGEND_FONT_SIZE = ir.plots.FONT_SIZE_SMALL_PT AXIS_LABEL_FONT_SIZE = ir.plots.FONT_SIZE_SMALL_PT ir.plots.setup_matplotlib(show_titles=SHOW_TITLES) # - y_axis_label = r'Risk($\lambda \to 0$) - Risk($\lambda_{\textnormal{opt}}$)' # ## Neural networks on noiseless MNIST # + client = mlflow.tracking.MlflowClient() nn_experiment = client.get_experiment_by_name(NN_EXPERIMENT_NAME) nn_runs = mlflow.search_runs( nn_experiment.experiment_id ) nn_runs = nn_runs.set_index('run_id', drop=False) # set index, but keep column to not break stuff depending on it # Convert number of MLP units to integer since they are stored as "[NUM_UNITS]" and soprt by them nn_runs['params.mlp_units'] = nn_runs['params.mlp_units'].str.strip('[] \t').astype(int) nn_runs = nn_runs.sort_values(['params.mlp_units']) print('Loaded', len(nn_runs), 'runs of experiment', NN_EXPERIMENT_NAME) assert nn_runs['status'].eq('FINISHED').all() target_metrics = ('test_std_accuracy', 'test_robust_accuracy') metrics = ir.mlflow.load_metrics(nn_runs, target_metrics, client) print('Loaded', len(target_metrics), 'metrics of', len(metrics.keys()), 'runs of experiment', NN_EXPERIMENT_NAME) AVERAGE_OVER_LAST_K = 10 best_std_accuracies = ir.plots.find_best_metrics(metrics, 'test_std_accuracy', maximize=True) best_robust_accuracies = ir.plots.find_best_metrics(metrics, 'test_robust_accuracy', maximize=True) last_std_accuracies = ir.plots.find_last_metrics(metrics, 'test_std_accuracy', average_over=AVERAGE_OVER_LAST_K) last_robust_accuracies = ir.plots.find_last_metrics(metrics, 'test_robust_accuracy', average_over=AVERAGE_OVER_LAST_K) current_std_metrics = np.zeros((len(nn_runs), AVERAGE_OVER_LAST_K)) current_adv_metrics = np.zeros((len(nn_runs), AVERAGE_OVER_LAST_K)) current_std_best = np.zeros((len(nn_runs),)) current_adv_best = np.zeros((len(nn_runs),)) for idx, run_id in enumerate(nn_runs['run_id']): current_std_metrics[idx, :] = 1.0 - last_std_accuracies[run_id] current_adv_metrics[idx, :] = 1.0 - last_robust_accuracies[run_id] current_std_best[idx] = 1.0 - best_std_accuracies[run_id] current_adv_best[idx] = 1.0 - best_robust_accuracies[run_id] nn_std_gaps = np.mean(current_std_metrics, axis=-1) - current_std_best nn_adv_gaps = np.mean(current_adv_metrics, axis=-1) - current_adv_best # + client = mlflow.tracking.MlflowClient() nn_noiseless_experiment = client.get_experiment_by_name(NN_NOISELESS_EXPERIMENT_NAME) nn_noiseless_runs = mlflow.search_runs( nn_noiseless_experiment.experiment_id ) nn_noiseless_runs = nn_noiseless_runs.set_index('run_id', drop=False) # set index, but keep column to not break stuff depending on it # Convert number of MLP units to integer since they are stored as "[NUM_UNITS]" and soprt by them nn_noiseless_runs['params.mlp_units'] = nn_noiseless_runs['params.mlp_units'].str.strip('[] \t').astype(int) nn_noiseless_runs = nn_noiseless_runs.sort_values(['params.mlp_units']) print('Loaded', len(nn_noiseless_runs), 'runs of experiment', NN_NOISELESS_EXPERIMENT_NAME) assert nn_noiseless_runs['status'].eq('FINISHED').all() metrics = ir.mlflow.load_metrics(nn_noiseless_runs, target_metrics, client) print('Loaded', len(target_metrics), 'metrics of', len(metrics.keys()), 'runs of experiment', NN_NOISELESS_EXPERIMENT_NAME) AVERAGE_OVER_LAST_K = 10 best_std_accuracies = ir.plots.find_best_metrics(metrics, 'test_std_accuracy', maximize=True) best_robust_accuracies = ir.plots.find_best_metrics(metrics, 'test_robust_accuracy', maximize=True) last_std_accuracies = ir.plots.find_last_metrics(metrics, 'test_std_accuracy', average_over=AVERAGE_OVER_LAST_K) last_robust_accuracies = ir.plots.find_last_metrics(metrics, 'test_robust_accuracy', average_over=AVERAGE_OVER_LAST_K) current_std_metrics = np.zeros((len(nn_noiseless_runs), AVERAGE_OVER_LAST_K)) current_adv_metrics = np.zeros((len(nn_noiseless_runs), AVERAGE_OVER_LAST_K)) current_std_best = np.zeros((len(nn_noiseless_runs),)) current_adv_best = np.zeros((len(nn_noiseless_runs),)) for idx, run_id in enumerate(nn_noiseless_runs['run_id']): current_std_metrics[idx, :] = 1.0 - last_std_accuracies[run_id] current_adv_metrics[idx, :] = 1.0 - last_robust_accuracies[run_id] current_std_best[idx] = 1.0 - best_std_accuracies[run_id] current_adv_best[idx] = 1.0 - best_robust_accuracies[run_id] nn_noiseless_std_gaps = np.mean(current_std_metrics, axis=-1) - current_std_best nn_noiseless_adv_gaps = np.mean(current_adv_metrics, axis=-1) - current_adv_best # - noiseless_cycle_idx = 0 noisy_cycle_idx = 1 # + fig, ax = plt.subplots(figsize=FIGURE_SIZE) ax.plot( nn_noiseless_runs[f'params.mlp_units'], nn_noiseless_adv_gaps, label=r'Pruned data', ls=ir.plots.LINESTYLE_MAP[noiseless_cycle_idx], c=f'C{noiseless_cycle_idx}', marker=ir.plots.MARKER_MAP[noiseless_cycle_idx] ) ax.plot( nn_runs[f'params.mlp_units'], nn_adv_gaps, label=r'Original data', ls=ir.plots.LINESTYLE_MAP[noisy_cycle_idx], c=f'C{noisy_cycle_idx}', marker=ir.plots.MARKER_MAP[noisy_cycle_idx] ) ax.set_xlabel('Number of hidden units', fontsize=AXIS_LABEL_FONT_SIZE) ax.set_ylabel('Robust accuracy gain', fontsize=AXIS_LABEL_FONT_SIZE) ax.set_ylim(bottom=0, top=0.05) ax.set_xlim(left=0) xticks = (0, 2000, 4000, 6000, 8000, 10000) ax.set_xticks(xticks) ax.set_xticklabels( f'{val // 1000}k' if val > 0 else '0' for val in xticks ) if SHOW_TITLES: fig.suptitle('NN') export_fig(fig, f'teaser_plot_nn.pdf') plt.show() # + # Legend legend_fig = plt.figure(figsize=LEGEND_FIGURE_SIZE_NN) handles, labels = ax.get_legend_handles_labels() legend_fig.legend( handles, labels, loc='center', ncol=2, mode='expand', frameon=True, fontsize=LEGEND_FONT_SIZE, borderpad=0.5 ) export_fig(legend_fig, f'teaser_plot_nn_legend.pdf') # - # ## Linear regression ST # + client = mlflow.tracking.MlflowClient() linreg_experiment = client.get_experiment_by_name(LINREG_EXPERIMENT_NAME) linreg_runs = mlflow.search_runs( linreg_experiment.experiment_id ) linreg_runs = linreg_runs.set_index('run_id', drop=False) # set index, but keep column to not break stuff depending on it # Convert some parameters to numbers and sort accordingly linreg_runs['params.data_dim'] = linreg_runs['params.data_dim'].astype(int) linreg_runs['params.test_attack_epsilon'] = linreg_runs['params.test_attack_epsilon'].astype(np.float) linreg_runs['params.l2_lambda'] = linreg_runs['params.l2_lambda'].astype(np.float) linreg_runs['params.data_gaussian_noise_variance'] = linreg_runs['params.data_gaussian_noise_variance'].astype(np.float) linreg_runs = linreg_runs.sort_values(['params.data_dim', 'params.l2_lambda', 'params.data_gaussian_noise_variance'], ascending=True) print('Loaded', len(linreg_runs), 'runs of experiment', LINREG_EXPERIMENT_NAME) assert linreg_runs['status'].eq('FINISHED').all() # + num_samples, = linreg_runs['params.num_train_samples'].astype(int).unique() nonoise, covariate_noise = np.sort(linreg_runs['params.data_gaussian_noise_variance'].unique()) assert nonoise == 0 and covariate_noise > 0 linreg_noiseless_runs = linreg_runs[linreg_runs['params.data_gaussian_noise_variance'] == 0] linreg_noise_runs = linreg_runs[linreg_runs['params.data_gaussian_noise_variance'] > 0] linreg_noreg_noiseless_runs = linreg_noiseless_runs[linreg_noiseless_runs['params.l2_lambda'] == 0] linreg_bestreg_noiseless_runs = linreg_noiseless_runs.groupby('params.data_dim').aggregate({'metrics.true_std_risk': 'min', 'metrics.true_robust_risk': 'min'}) linreg_noreg_noise_runs = linreg_noise_runs[linreg_noise_runs['params.l2_lambda'] == 0] linreg_bestreg_noise_runs = linreg_noise_runs.groupby('params.data_dim').aggregate({'metrics.true_std_risk': 'min', 'metrics.true_robust_risk': 'min'}) linreg_noiseless_std_gaps = linreg_noreg_noiseless_runs['metrics.true_std_risk'].values - linreg_bestreg_noiseless_runs['metrics.true_std_risk'].values linreg_noiseless_robust_gaps = linreg_noreg_noiseless_runs['metrics.true_robust_risk'].values - linreg_bestreg_noiseless_runs['metrics.true_robust_risk'].values linreg_noise_std_gaps = linreg_noreg_noise_runs['metrics.true_std_risk'].values - linreg_bestreg_noise_runs['metrics.true_std_risk'].values linreg_noise_robust_gaps = linreg_noreg_noise_runs['metrics.true_robust_risk'].values - linreg_bestreg_noise_runs['metrics.true_robust_risk'].values # + fig, ax = plt.subplots(figsize=FIGURE_SIZE) ax.plot( linreg_noreg_noiseless_runs[f'params.data_dim'].unique() / float(num_samples), linreg_noiseless_robust_gaps, label=r'Without noise', ls=ir.plots.LINESTYLE_MAP[noiseless_cycle_idx], marker=ir.plots.MARKER_MAP[noiseless_cycle_idx], c=f'C{noiseless_cycle_idx}' ) ax.plot( linreg_noreg_noise_runs[f'params.data_dim'].unique() / float(num_samples), linreg_noise_robust_gaps, label=r'With noise', ls=ir.plots.LINESTYLE_MAP[noisy_cycle_idx], marker=ir.plots.MARKER_MAP[noisy_cycle_idx], c=f'C{noisy_cycle_idx}' ) ax.set_xlabel('d/n', fontsize=AXIS_LABEL_FONT_SIZE) ax.set_ylabel(y_axis_label, fontsize=AXIS_LABEL_FONT_SIZE) xticks = (0, 2, 4, 6, 8, 10) ax.set_xticks(xticks) ax.set_ylim(bottom=0.0, top=0.5) ax.set_xlim(left=0) if SHOW_TITLES: fig.suptitle('Linear regression') export_fig(fig, f'teaser_plot_linreg.pdf') plt.show() # - # ## Logistic regression with consistent attacks # + client = mlflow.tracking.MlflowClient() logreg_experiment = client.get_experiment_by_name(LOGREG_EXPERIMENT_NAME) logreg_runs = mlflow.search_runs( logreg_experiment.experiment_id ) logreg_runs = logreg_runs.set_index('run_id', drop=False) # set index, but keep column to not break stuff depending on it # Convert some parameters to numbers and sort accordingly logreg_runs['params.data_dim'] = logreg_runs['params.data_dim'].astype(int) logreg_runs['params.train_attack_epsilon'] = logreg_runs['params.train_attack_epsilon'].astype(np.float) logreg_runs['params.test_attack_epsilon'] = logreg_runs['params.test_attack_epsilon'].astype(np.float) logreg_runs['params.l2_lambda'] = logreg_runs['params.l2_lambda'].astype(np.float) logreg_runs['params.label_noise'] = logreg_runs['params.label_noise'].astype(np.float) logreg_runs = logreg_runs.sort_values(['params.data_dim', 'params.l2_lambda']) print('Loaded', len(logreg_runs), 'runs of experiment', LOGREG_EXPERIMENT_NAME) assert logreg_runs['status'].eq('FINISHED').all() # + num_samples, = logreg_runs['params.data_num_train_samples'].astype(int).unique() nonoise, label_noise = np.sort(logreg_runs['params.label_noise'].unique()) assert nonoise == 0 and label_noise > 0 logreg_noiseless_runs = logreg_runs[logreg_runs['params.label_noise'] == 0] logreg_noise_runs = logreg_runs[logreg_runs['params.label_noise'] > 0] logreg_noreg_noiseless_runs = logreg_noiseless_runs[logreg_noiseless_runs['params.l2_lambda'] == 0] logreg_bestreg_noiseless_runs = logreg_noiseless_runs.groupby('params.data_dim').aggregate({'metrics.true_std_risk': 'min', 'metrics.true_robust_risk': 'min'}) logreg_noreg_noise_runs = logreg_noise_runs[logreg_noise_runs['params.l2_lambda'] == 0] logreg_bestreg_noise_runs = logreg_noise_runs.groupby('params.data_dim').aggregate({'metrics.true_std_risk': 'min', 'metrics.true_robust_risk': 'min'}) logreg_noiseless_std_gaps = logreg_noreg_noiseless_runs['metrics.true_std_risk'].values - logreg_bestreg_noiseless_runs['metrics.true_std_risk'].values logreg_noiseless_robust_gaps = logreg_noreg_noiseless_runs['metrics.true_robust_risk'].values - logreg_bestreg_noiseless_runs['metrics.true_robust_risk'].values logreg_noise_std_gaps = logreg_noreg_noise_runs['metrics.true_std_risk'].values - logreg_bestreg_noise_runs['metrics.true_std_risk'].values logreg_noise_robust_gaps = logreg_noreg_noise_runs['metrics.true_robust_risk'].values - logreg_bestreg_noise_runs['metrics.true_robust_risk'].values # + fig, ax = plt.subplots(figsize=FIGURE_SIZE) ax.plot( logreg_noreg_noiseless_runs[f'params.data_dim'].unique() / float(num_samples), logreg_noiseless_robust_gaps, label=r'Without noise', ls=ir.plots.LINESTYLE_MAP[noiseless_cycle_idx], marker=ir.plots.MARKER_MAP[noiseless_cycle_idx], c=f'C{noiseless_cycle_idx}' ) ax.plot( logreg_noreg_noise_runs[f'params.data_dim'].unique() / float(num_samples), logreg_noise_robust_gaps, label=r'With noise', ls=ir.plots.LINESTYLE_MAP[noisy_cycle_idx], marker=ir.plots.MARKER_MAP[noisy_cycle_idx], c=f'C{noisy_cycle_idx}' ) ax.set_xlabel('d/n', fontsize=AXIS_LABEL_FONT_SIZE) ax.set_ylabel(y_axis_label, fontsize=AXIS_LABEL_FONT_SIZE) xticks = (0, 2, 4, 6, 8, 10) ax.set_xticks(xticks) ax.set_ylim(bottom=0.0) ax.set_xlim(left=0) if SHOW_TITLES: fig.suptitle('Logistic regression') export_fig(fig, f'teaser_plot_logreg.pdf') plt.show() # + # Legend legend_fig = plt.figure(figsize=LEGEND_FIGURE_SIZE_OTHERS) handles, labels = ax.get_legend_handles_labels() legend_fig.legend( handles, labels, loc='center', ncol=2, mode='expand', frameon=True, fontsize=LEGEND_FONT_SIZE, borderpad=0.5 ) export_fig(legend_fig, f'teaser_plot_other_legend.pdf')
teaser_plots/teaser_plot_full.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="LAO-rJ_-I0ul" colab_type="code" colab={} import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # + id="Z9Wp2vduI0ut" colab_type="code" colab={} import os import os.path as Path # + id="aMq4Nu_AI_Xo" colab_type="code" colab={} # + id="Ugf7nDVsI0ux" colab_type="code" colab={} outputId="30fdd68b-444f-4135-b6bd-3b7d6612c565" data_dir = Path.join('..','data') url = Path.join(data_dir,'raw','hvc_annotations.csv') url # + id="By1f7KmFI0u3" colab_type="code" colab={} # if Path.isfile(url): # df = pd.read_csv(url) # df.head(2) try: df = pd.read_csv(url) except: raise df.head(2) # + id="GkKpLHG6I0u7" colab_type="code" colab={} df.drop('filename', axis=1, inplace=True) # + id="279y0RfHI0u_" colab_type="code" colab={} df['old_image_path'] = df['image_path'] df['image_path'] = df['image_path'].apply(lambda x: Path.join(data_dir, 'raw', x)) # + id="DYeJXs1YI0vD" colab_type="code" colab={} outputId="a065bc82-dce7-428c-b704-e7022c5fdb19" df.head(2) # + id="6ikrmq9VI0vH" colab_type="code" colab={} outputId="3931fbd9-4989-496b-d456-761d890bbbe2" from IPython.display import Image Image(df.image_path[0]) # + id="hW0j_qtoI0vM" colab_type="code" colab={} outputId="28ef7d72-d5c4-4cba-bd12-19c896b13a8b" Image(df.image_path[100]) # + id="59pX1S2YI0vQ" colab_type="code" colab={} df.drop('old_image_path', axis=1, inplace=True) # + id="twN_EXm5I0vU" colab_type="code" colab={} outputId="380ce8d8-4596-4502-9c90-0093ebb2cb14" df.info() # + [markdown] id="uvQTH5lbI0vY" colab_type="text" # No null values # + id="-d2wTNdKI0va" colab_type="code" colab={} # + id="BlWKR3VuI0ve" colab_type="code" colab={} outputId="9325c242-6a4b-4369-b99c-93265ee2ec10" df.describe() # + id="koEd5eiPI0vi" colab_type="code" colab={} caterogical_df = df.drop('image_path', axis=1) # + id="-QwKg72cI0vm" colab_type="code" colab={} outputId="7711a23f-1e58-46f9-81c0-cb4f2f41e9af" caterogical_df.describe().T # + id="9SUP_Z-NI0vr" colab_type="code" colab={} outputId="1f3953fd-f7f6-4a73-c6ed-abdf76cc5c82" categ_labels = caterogical_df.columns for label in categ_labels: print(label.ljust(10), '\t : ', caterogical_df[label].unique()) # + id="fRHuYR15I0v0" colab_type="code" colab={} outputId="e73aa282-f062-46bb-9037-dc6d40413805" #Plots to analyze distribution of data in categories for i,col in enumerate(caterogical_df): plt.figure(i) sns.countplot(y=caterogical_df[col]) # + id="5EZsdRwoI0v9" colab_type="code" colab={} outputId="b24bd5b0-7d4c-40ee-c162-130661b1afd3" cat_one_hot_df = pd.concat([pd.get_dummies(df[label], prefix=label) for label in categ_labels], axis=1) cat_one_hot_df.head(2) # + [markdown] id="4BsA_1VLI0wF" colab_type="text" # ### Saving one_hot_encoded df and updated file path # + id="7Y9UmpQAI0wH" colab_type="code" colab={} outputId="0eeee7a2-d537-41d9-845d-97035b1ef706" cat_one_hot_df.describe().T # + id="H6FJSOfvI0wM" colab_type="code" colab={} outputId="b7ecb038-f4fc-4359-94f1-1a66b8a5022b" sns.heatmap(cat_one_hot_df.corr(), annot=True) plt.show() # + [markdown] id="mGSv3KLiI0wT" colab_type="text" # Since we have 27 columns the correlations heat map is incomprehensible.. Tried with categorical correlations which can be found at the end of the notebook, didn't spend much time exploring as I don't think that matter to the task at hand - added references to start with when revisited # + [markdown] id="ManyHdptI0wX" colab_type="text" # # Saving processed data to file # + id="jaF7Q_izI0wZ" colab_type="code" colab={} outputId="c2586ae2-e998-4db4-bb62-e63a7665320c" # one_hot_df = pd.concat([pd.get_dummies(df[label], prefix=label) for label in labels], axis=1) preprocessed_df = pd.concat([df["image_path"], cat_one_hot_df], axis=1) preprocessed_df.head(2) # + id="i-qB4Pv_I0wi" colab_type="code" colab={} output = Path.join(data_dir, 'interim', 'preprocessed_data.csv') # + id="E2fd66E_I0wn" colab_type="code" colab={} preprocessed_df.to_csv(output, index=False, columns=preprocessed_df.columns, mode='w') # + id="_EwdMIDAI0ws" colab_type="code" colab={} outputId="faa9cd9e-0f8b-4a6c-bac8-6861d07eb027" # validating try: vdf = pd.read_csv(output) except: raise vdf.head(2) # + [markdown] heading_collapsed=true id="zAQdXxiaI0wy" colab_type="text" # ### Categorical Correlations # + hidden=true id="Pc_OKA9SI0wz" colab_type="code" colab={} # https://towardsdatascience.com/the-search-for-categorical-correlation-a1cf7f1888c9 # def cramers_v(x, y): # confusion_matrix = pd.crosstab(x,y) # chi2 = ss.chi2_contingency(confusion_matrix)[0] # n = confusion_matrix.sum().sum() # phi2 = chi2/n # r,k = confusion_matrix.shape # phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1)) # rcorr = r-((r-1)**2)/(n-1) # kcorr = k-((k-1)**2)/(n-1) # return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1))) # + hidden=true id="-ODPerViI0w3" colab_type="code" colab={} # https://stackoverflow.com/a/48035423/7445772 from scipy.stats import chisquare df1=caterogical_df.apply(lambda x : pd.factorize(x)[0])+1 corr = pd.DataFrame([chisquare(df1[x].values,f_exp=df1.values.T,axis=1)[0] for x in df1]) corr.columns = categ_labels corr.index = categ_labels # + hidden=true id="GdDnAotxI0w7" colab_type="code" colab={} outputId="9f6f68a6-8d43-48fe-e29c-fd40a4cb3685" sns.heatmap(corr, annot=True) plt.show() # + hidden=true id="upDL8lUrI0xB" colab_type="code" colab={} # + hidden=true id="3Y9IqrjQI0xF" colab_type="code" colab={}
Week5/PersonAttributes/notebooks/Preprocessing and EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np # %matplotlib inline from matplotlib import pyplot as plt, cm from mpl_toolkits.mplot3d import Axes3D from numba import cuda from numba import * # - def plot_contour(x, y, p, u, v): fig = plt.figure(figsize = (11, 7), dpi = 100) X, Y = np.meshgrid(x, y) #Velocity field plt.quiver(X[::5, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b], Y[::5, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b], u[::5, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b], v[::5, ::5]) plt.xlabel('X') plt.ylabel('Y'); @jit(parallel = True, cache = True) def buildB(b, rho, dt, u, v, dx, dy): dy2 = dy**2 dx2 = dx**2 dx2y2 = dy2 * dx2 sumdx2y2 = dy2 + dx2 b[1:-1,1:-1] = dx2y2 / (2 * (sumdx2y2)) * (rho * (1 / dt * ((u[1:-1,2:] - u[1:-1,0:-2]) / (2 * dx) + (v[2:,1:-1] - v[0:-2,1:-1]) / (2 * dy)) - ((u[1:-1,2:] - u[1:-1,0:-2]) / (2 * dx))**2 - 2 * ((u[2:,1:-1] - u[0:-2,1:-1]) / (2 * dy) * (v[1:-1,2:] - v[1:-1,0:-2]) / (2 * dx)) - ((v[2:,1:-1] - v[0:-2,1:-1]) / (2 * dy))**2)) #periodic BC pressure @x = 2 b[1:-1,-1] = dx2y2 / (2 * (sumdx2y2)) * (rho * (1 / dt * ((u[1:-1,0] - u[1:-1,-2]) / (2 * dx) + (v[2:,-1] - v[0:-2,-1]) / (2 * dy)) - ((u[1:-1,0] - u[1:-1,-2]) / (2 * dx))**2 - 2 * ((u[2:,-1] - u[0:-2,-1]) / (2 * dy) * (v[1:-1,0] - v[1:-1,-2]) / (2 * dx)) - ((v[2:,-1] - v[0:-2,-1]) / (2 * dy))**2)) # Periodic BC Pressure @ x = 0 b[1:-1,0] = dx2y2 / (2 * (sumdx2y2)) * (rho * (1 / dt * ((u[1:-1,1] - u[1:-1,-14]) / (2 * dx) + (v[2:,0] - v[0:-2,0]) / (2 * dy)) - ((u[1:-1,1] - u[1:-1,-1]) / (2 * dx))**2 - 2 * ((u[2:,0] - u[0:-2,0]) / (2 * dy) * (v[1:-1,1] - v[1:-1,-1]) / (2 * dx)) - ((v[2:,0] - v[0:-2,0]) / (2 * dy))**2)) return b cuda.detect() def pfunc(pnx1, pnxm1, pny1, pnym1, bxy, dx2, dy2, sumdx2dy2): pxy = (((pnx1 + pnxm1) * dy2 + (pny1 + pnym1) * dx2 ) / (2 * sumdx2dy2)) - b return pxy pgpu = cuda.jit(device = True)(pfunc) # + @cuda.jit('void(float32[:,:], float32[:,:], float32[:,:], float32, float32)') def pres_poisson(p, pn, b, dx, dy): dy2 = dy**2 dx2 = dx**2 sumdx2dy2 = dy2 + dx height = p.shape[0] width = p.shape[1] startX, startY = cuda.grid(2) gridX = cuda.gridDim.x * cuda.blockDim.x; gridY = cuda.gridDim.y * cuda.blockDim.y; for i in range(50): pn = p for x in range((startX + 1) , (width - 1), gridX): for y in range((startY + 1), (height - 1), gridY): p[y,x] = (((pn[y,(x+1)] + pn[y,(x-1)]) * dy2 + (pn[(y+1),x] + pn[(y-1),x]) * dx2 ) / (2 * sumdx2dy2)) - b[y,x] # Wall BC pressure p[0,x] = p[1,x] # dp/dy = 0 at y = 0 p[-1,x] = p[-2,x] # dp/dy = 0 at y = # Periodic BC pressure @ x = 2 for y in range((startY), (height), gridY): p[y,-1] = (((pn[y,0] + pn[y,-2]) * dy2 + (pn[(y+1),-1] + pn[(y-1),-1]) * dx2) / (2 * (sumdx2dy2)) - b[y,-1]) p[y,0] = (((pn[y,1] + pn[y,-1]) * dy2 + (pn[(y+1),0] + pn[(y-1),0]) * dx2) / (2 * (sumdx2dy2)) - b[y,0]) # + def channel_flow(udiff_target,F, u, v, dt, dx, dy, p, b, rho, nu): udiff = 10 stepcount = 0 un = np.empty_like(u) vn = np.empty_like(v) pn = np.empty_like(p) #b = np.zeros((ny, nx)) dtx = dt / dx dty = dt / dy dtx2 = dt / dx**2 dty2 = dt / dy**2 while udiff > udiff_target: un = u.copy() vn = v.copy() b = buildB(b, rho, dt, u, v, dx, dy) blockdim = (9,9) griddim = (11, 11) d_b = cuda.to_device(b) d_p = cuda.to_device(p) d_pn = cuda.to_device(pn) pres_poisson[griddim, blockdim](p, pn, b, dx, dy) d_b.to_host() d_p.to_host() d_pn.to_host() u[1:-1, 1:-1] = (un[1:-1, 1:-1]- un[1:-1, 1:-1] * dtx * (un[1:-1, 1:-1] - un[1:-1, 0:-2]) - vn[1:-1, 1:-1] * dty * (un[1:-1, 1:-1] - un[0:-2, 1:-1]) - dtx / (2 * rho) * (p[1:-1, 2:] - p[1:-1, 0:-2]) + nu * (dtx2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) + dty2* (un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])) + F * dt) v[1:-1,1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * dtx * (vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) - vn[1:-1, 1:-1] * dty * (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - dty / (2 * rho) * (p[2:, 1:-1] - p[0:-2, 1:-1]) + nu * (dtx2 * (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) + dty2* (vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1]))) # Periodic BC u @ x = 2 u[1:-1, -1] = (un[1:-1, -1] - un[1:-1, -1] * dt / dx * (un[1:-1, -1] - un[1:-1, -2]) - vn[1:-1, -1] * dt / dy * (un[1:-1, -1] - un[0:-2, -1]) - dt / (2 * rho * dx) * (p[1:-1, 0] - p[1:-1, -2]) + nu * (dt / dx**2 * (un[1:-1, 0] - 2 * un[1:-1,-1] + un[1:-1, -2]) + dt / dy**2 * (un[2:, -1] - 2 * un[1:-1, -1] + un[0:-2, -1])) + F * dt) # Periodic BC u @ x = 0 u[1:-1, 0] = (un[1:-1, 0] - un[1:-1, 0] * dt / dx * (un[1:-1, 0] - un[1:-1, -1]) - vn[1:-1, 0] * dt / dy * (un[1:-1, 0] - un[0:-2, 0]) - dt / (2 * rho * dx) * (p[1:-1, 1] - p[1:-1, -1]) + nu * (dt / dx**2 * (un[1:-1, 1] - 2 * un[1:-1, 0] + un[1:-1, -1]) + dt / dy**2 * (un[2:, 0] - 2 * un[1:-1, 0] + un[0:-2, 0])) + F * dt) # Periodic BC v @ x = 2 v[1:-1, -1] = (vn[1:-1, -1] - un[1:-1, -1] * dt / dx * (vn[1:-1, -1] - vn[1:-1, -2]) - vn[1:-1, -1] * dt / dy * (vn[1:-1, -1] - vn[0:-2, -1]) - dt / (2 * rho * dy) * (p[2:, -1] - p[0:-2, -1]) + nu * (dt / dx**2 * (vn[1:-1, 0] - 2 * vn[1:-1, -1] + vn[1:-1, -2]) + dt / dy**2 * (vn[2:, -1] - 2 * vn[1:-1, -1] + vn[0:-2, -1]))) # Periodic BC v @ x = 0 v[1:-1, 0] = (vn[1:-1, 0] - un[1:-1, 0] * dt / dx * (vn[1:-1, 0] - vn[1:-1, -1]) - vn[1:-1, 0] * dt / dy * (vn[1:-1, 0] - vn[0:-2, 0]) - dt / (2 * rho * dy) * (p[2:, 0] - p[0:-2, 0]) + nu * (dt / dx**2 * (vn[1:-1, 1] - 2 * vn[1:-1, 0] + vn[1:-1, -1]) + dt / dy**2 * (vn[2:, 0] - 2 * vn[1:-1, 0] + vn[0:-2, 0]))) # Wall BC: u,v = 0 @ y = 0,2 u[0, :] = 0 u[-1, :] = 0 v[0, :] = 0 v[-1, :]=0 udiff = (np.sum(u) - np.sum(un)) / np.sum(u) stepcount += 1 print(stepcount) print(stepcount * dt) return u, v, p # - def chn_sim_run(udiff_target): nx = 101 ny = nx dx = 2 / (nx - 1) dy = 2 / (ny - 1) x = np.linspace(0, 2, nx) y = np.linspace(0, 2, ny) rho = 1 nu = .1 F = 0.0798 sigma = .5 dt = sigma * dx * dy / nu u = np.zeros((ny, nx)) v = np.zeros((ny, nx)) p = np.zeros((ny, nx)) b = np.zeros((ny, nx)) u, v, p = channel_flow(udiff_target,F, u, v, dt, dx, dy, p, b, rho, nu) plot_contour(x, y, p, u, v) y = np.linspace(0, 2, nx) a = np.asarray((F * rho * ( 1 - (y - 1)**2) / (2 * nu))) d = a - u[:,int((nx-1)/2)] globalerror = np.sum(np.abs(d))/ np.sum(np.abs(a)) #print(np.abs(d),":",np.abs(a)) return u, v, p, dx, dy, globalerror, a # %time u, v, p, dx, dy, globalerror, a = chn_sim_run(1) # %time u, v, p, dx, dy, globalerror,a = chn_sim_run(1e-6) dx,dy,globalerror fig = plt.figure(figsize=(11, 7), dpi=100) plt.plot(u[:,40], marker = '.', lw=0.5, label='computed') plt.plot(a, marker = 'x', lw=0.5, label='analytical') plt.legend(); um = np.amax(u) Re = um * 2 / 0.1 Re
GPU_Channel1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Patchy overview of `rlplay` with REINFORCE # + import torch import numpy import matplotlib.pyplot as plt # %matplotlib inline # - # This procedure is not related to the package, but its useful for combining the logged data. def collate(dicts): """collate identically keyed dicts""" out = {} for dict in dicts: for k, v in dict.items(): out.setdefault(k, []).append(v) return out # <br> # ## Rollout collection # Rollout collection is designed to be as much `plug-n-play` as possible, i.e. it # supports **arbitrarily structured nested containers** of arrays or tensors for # environment observations and actions. The actor, however, should **expose** # certain API (described below). # + from rlplay.engine import core # help(core.collect) # - # It's role is to serve as a *middle-man* between the **actor-environment** pair # and the **training loop**: to track the trajectory of the actor in the environment, # and properly record it into the data buffer. # # For example, it is not responsible for seeding or randomization of environments # (i'm looking at you, `AtariEnv`), and datatype casting (except for rewards, # which are cast to `fp32` automatically). In theory, there is **no need** for # special data preprocessing, except for, perhaps, casting data to proper dtypes, # like from `numpy.float64` observations to `float32` in `CartPole`. # #### Semantics # The collector just carefully records the trajectory by alternating between # the **REACT** and **STEP+EMIT** phases in the following fashion: # # $$ # \cdots # \longrightarrow t # \overset{\mathrm{REACT}}{\longrightarrow} t + \tfrac12 # \overset{\mathrm{STEP+EMIT}}{\longrightarrow} t + 1 # \longrightarrow \cdots # \,, $$ # # where the half-times $t + \tfrac12$ are commonly referred to as the `afterstates`: # the actor has chosen an action in response to the current observation, yet has # not interacted with the environment. # # So the `time` advances in halves, and the proper names for the half times # in the diagram above are the `state`, the `afterstate` and the `next state`, # respectively. # The collected `fragment` data has the following structure: # * `.state` $z_t$ **the current "extended" observation** # * `.stepno` $n_t$ the step counter # * `.obs` $x_t$ **the current observation** emitted by transitioning to $s_t$ # * `.act` $a_{t-1}$ **the last action** which caused $s_{t-1} \longrightarrow s_t$ in the env # * `.rew` $r_t$ **the previous reward** received by getting to $s_t$ # * `.fin` $d_t$ **the termination flag** indicating if $s_t$ is terminal in the env # # * `.actor` $A_t$ auxiliary data from the actor due to **REACT** # # * `.env` $E_{t+1}$ auxiliary data from the environment due to **STEP+EMIT** # # * `.hx` $h_0$ the starting recurrent state of the actor # # Here $s_t$ denotes **the unobserved true full state** of the environment. # The actor $\theta$ interacts with the environment and generates the following # <span style="color:orange">**tracked**</span> data during the rollout, # unobserved/non-tracked data <span style="color:red">**in red**</span> # and $t = 0..T-1$: # # * ${\color{orange}{h_0}}$, the starting recurrent state, is recorded in $\,.\!\mathtt{hx}$ # # * **REACT**: the actor performs the following update ($t \to t + \frac12$) # # $$ # \bigl( # \underbrace{ # .\!\mathtt{state}[\mathtt{t}] # }_{{\color{orange}{z_t}}},\, # {\color{red}{h_t}} # \bigr) # \overset{\text{Actor}_{\theta_{\text{old}}}}{\longrightarrow} # \bigl( # \underbrace{ # .\!\mathtt{state}.\!\mathtt{act}[\mathtt{t+1}] # }_{a_t \leadsto {\color{orange}{z_{t+1}}}},\, # \underbrace{ # .\!\mathtt{actor}[\mathtt{t}] # }_{{\color{orange}{A_t}}},\, # {\color{red}{h_{t+1}}} # \bigr) # \,, $$ # # * **STEP+EMIT**: the environment updates it's unobserved state and emits # the observed data ($t + \frac12 \to t+1_-$) # # $$ # \bigl( # {\color{red}{s_t}},\, # \underbrace{ # .\!\mathtt{state}.\!\mathtt{act}[\mathtt{t+1}] # }_{a_t \leadsto {\color{orange}{z_{t+1}}}} # \bigr) # \overset{\text{Env}}{\longrightarrow} # \bigl( # {\color{red}{s_{t+1}}},\, # \underbrace{ # .\!\mathtt{state}.\!\mathtt{obs}[\mathtt{t+1}] # }_{x_{t+1} \leadsto {\color{orange}{z_{t+1}}}},\, # \underbrace{ # .\!\mathtt{state}.\!\mathtt{rew}[\mathtt{t+1}] # }_{r_{t+1} \leadsto {\color{orange}{z_{t+1}}}},\, # \underbrace{ # .\!\mathtt{state}.\!\mathtt{fin}[\mathtt{t+1}] # }_{d_{t+1} \leadsto {\color{orange}{z_{t+1}}}},\, # \underbrace{ # .\!\mathtt{env}[\mathtt{t}] # }_{{\color{orange}{E_{t+1}}}} # \bigr) # \,, $$ # # * collect loop ($t + 1_- \to t+1$) # # $$ # \bigl( # {\color{orange}{n_t}},\, # {\color{orange}{d_{t+1}}} # \bigr) # \longrightarrow # \underbrace{ # .\!\mathtt{state}.\!\mathtt{stepno}[\mathtt{t+1}] # }_{n_{t+1} \leadsto {\color{orange}{z_{t+1}}}} # \,. $$ # # Here $r_t$ is a scalar reward, $d_t = \top$ if $s_t$ is terminal, or $\bot$ # otherwise, $n_{t+1} = 0$ if $d_t = \top$, else $1 + n_t$, and $a \leadsto b$ # means $a$ being recored into $b$. # In general, we may treat $z_t$, the extended observation, as an ordinary # observation, by **suitably modifying** the environment: we can make it # recall the most recent action $a_{t-1}$ and compute the termination indicator # $d_t$ of the current state, and let it keep track of the interaction counter # $n_t$, and, finally, we can configure it to supply the most recent reward # $r_t$ as part of the emitted observation. # # Hence we essentially consider the following POMDP setup: # \begin{align} # a_t, h_{t+1}, A_t # &\longleftarrow \operatorname{Actor}(z_t, h_t; \theta) # \,, \\ # z_{t+1}, r_{t+1}, E_{t+1}, s_{t+1} # &\longleftarrow \operatorname{Env}(s_t, a_t) # \,, \\ # \end{align} # Specifically, let $ # (z_t)_{t=0}^T # = (n_t, x_t, a_{t-1}, r_t, d_t)_{t=0}^T # $ be the trajectory fragment in `.state`, and $h_0$, `.hx`, be the starting # (not necessarily the initial) recurrent state of the actor at the begining # of the rollout. # #### Requirements # # * all nested containers **must be** built from pure python `dicts`, `lists`, `tuples` or `namedtuples` # # * the environment communicates either in **numpy arrays** or in python **scalars**, but not in data types that are incompatible with pytorch (such as `str` or `bytes`) # # ```python # # example # obs = { # 'camera': { # 'rear': numpy.zeros(3, 320, 240), # 'front': numpy.zeros(3, 320, 240), # }, # 'proximity': (+0.1, +0.2, -0.1, +0.0,), # 'other': { # 'fuel_tank': 78.5, # 'passenger': False, # }, # } # ``` # # * the actor communicates in torch tensors **only** # # * the environment produces **float scalar** rewards (other data may be communicated through auxiliary environment info-dicts) # ### Container support with `.plyr` # # One of the core tools used in `rlplay` is a high performing procedure that traverses # containers of `list`, `dict` and `tuple` and calls the specified function with the # non-container objects found the containers as arguments (like `map`, but not an iterator # for arbitrarily and applicable to structured objects). # # See [plyr](https://pypi.org/project/python-plyr/), its `README.md` and # `plyr.apply` for docs. # # The `apply` procedure has slightly faster specialized version `suply` and `tuply`, # which do not waste time on validating the structure of the containers. They differ # in the manner in which they call the specified function: the first passes positional # arguments, while the second passes all arguments in one tuple (think of `map` and # `starmap` from `functools`) # + # appliers of functions to nested objects from plyr import apply, suply, tuply # `setitem` function with argument order, specialized for `apply` from plyr import xgetitem, xsetitem # help(apply) # - # How to use `suply` to reset the recurrent state `hx` returned by `torch.nn.LSTM`: # ```python # # the mask of inputs just after env resets # fin = torch.randint(2, size=(10, 4), dtype=bool) # # # the tensors in `hx` must have the same 2nd dim as `fin` # hx = torch.randn(2, 1, 4, 32, requires_grad=False).unbind() # h0 = torch.zeros(2, 1, 4, 32, requires_grad=True).unbind() # # XXX h0 and hx are tuples of tensors (but we're just as good with dicts) # # # get the masks at step 2, and make it broadcastable with 3d hx # m = ~fin[2].unsqueeze(-1) # reset tensors at fin==False # # # multiply by zero the current `hx` (diff-able reset and grad stop) # suply( # m.mul, # `.mul` method of the mask upcasts from bool to float if necessary # hx, # arg `other` of `.mul` # ) # # # replace the reset batch elments by a diff-able init value # suply( # torch.add, # .add(input, other, *, alpha=1.) # suply(m.mul, hx), # arg `input` of `.add` # suply(r.mul, h0), # arg `other` of `.add` # # alpha=1. # pass other `alpha` if we want # ) # # # XXX `torch.where` does not have an `easily` callable interface # suply( # lambda a, b: torch.where(m, a, b), # or `a.where(m, b)` # hx, h0, # ) # ``` # For example, this is used to manually run the recurrent network loop: # ```python # from torch.nn.utils.rnn import pack_padded_sequence # from torch.nn.utils.rnn import pad_packed_sequence # # # if use_cudnn and sticky: # # sequence padding (MUST have sampling with `sticky=True`) # n_steps, n_env, *_ = fin.shape # if n_steps > 1: # # we assume sticky=True # lengths = 1 + (~fin[1:]).sum(0).cpu() # first observation's fin should be ignored # inputs = pack_padded_sequence(input, lengths, enforce_sorted=False) # # output, hx = self.core(inputs, hx) # if n_steps > 1: # output, lens = pad_packed_sequence( # output, batch_first=False, total_length=n_steps) # # else: # # input is T x B x F, hx is either None, or a proper recurrent state # outputs = [] # for x, m in zip(input.unsqueeze(1), ~fin.unsqueeze(-1)): # # `m` indicates if no reset took place, otherwise # # multiply by zero to stop the grads # if hx is not None: # hx = suply(m.mul, hx) # # output, hx = self.core(x, hx) # outputs.append(output) # # output = torch.cat(outputs, dim=0) # ``` # <br> # ### Creating the actors # Rollout collection relies on the following API of the actor: # * `.reset(j, hx)` reset the recurrent state of the j-th environment in the batch (if applicable) # * `hx` contains tensors with shape `(n_lstm_layers * n_dir) x batch x hidden`, or is an empty tuple # * the returned `hx` is the updated recurrent state # # # * `.step(stepno, obs, act, rew, fin, /, *, hx, virtual)` get the next action $a_t$, the recurrent state $h_{t+1}$, and # the **extra info** in response to $n_t$, $x_t$, $a_{t-1}$, $r_t$, $d_t$, and $h_t$ respectively. # * extra info `dict` **might** include `value` key with a `T x B` tensor of state value estimates $ # v_t(z_t) \approx G_t = \mathbb{E} \sum_{j\geq t} \gamma^{j-t} r_{j+1} # $. # * MUST allocate new `hx` if the recurrent state is updated # * MUST NOT change the inputs in-place # # + from rlplay.engine import BaseActorModule help(BaseActorModule.reset) # - help(BaseActorModule.step) # `BaseActorModule` is essentially a thin sub-class of `torch.nn.Module`, that implements # the API through `.forward(obs, act, rew, fin, *, hx, stepno)`, which should return three things: # # 1. `actions` prescribed actions in the environment, with data of shape `n_steps x batch x ...` # * can be a nested container of dicts, lists, and tuples # # # 2. `hx` data with shape `n_steps x batch x ...` # * can be a nested container of dicts, lists, and tuples # * **if an actor is not recurrent**, then must return an empty container, e.g. a tuple `()` # # # 3. `info` object, which might be a tensor or a nested object containing data in tensors # `n_steps x batch x ...`. For example, one may communicate the following data: # * `value` -- the state value estimates $v(z_t)$ # * `logits` -- the policy logits $\log \pi(\cdot \mid z_t)$ # * `q` -- $Q(z_t, \cdot)$ values # Here is an example actor, that wraps a simple MLP policy. # + from rlplay.utils.common import multinomial class PolicyWrapper(BaseActorModule): """A non-recurrent policy for a flat `Discrete(n)` action space.""" def __init__(self, policy): super().__init__() self.policy = policy # for updating the exploration epsilon in the clones # self.register_buffer('epsilon', torch.tensor(epsilon)) def forward(self, obs, act=None, rew=None, fin=None, *, hx=None, stepno=None, virtual=False): # Everything is [T x B x ...] logits = self.policy(locals()) actions = multinomial(logits.detach().exp()) return actions, (), dict(logits=logits) # - # <br> # ### Manual rollout collection # We shall need the following procedures from the core of the engine: from rlplay.engine.core import prepare, startup, collect # Manual collection requires an `actor` and a batch of environment instances `envs`. # Prepare the run-time context for the specified `actor` and the environments # ```python # # settings # sticky = False # whether to stop interacting if an env resets mid-fragment # device = None # specifies the device to put the actor's inputs and data onto # pinned = False # whether to keep the running context in non-resizable pinned # # (non-paged) memory for faster host-device transfers # # # initialize a buffer for one rollout fragment # buffer = prepare(envs[0], actor, n_steps, len(envs), # pinned=False, device=device) # # # the running context tor the actor and the envs (optionally pinned) # ctx, fragment = startup(envs, actor, buffer, pinned=pinned) # # while not done: # # collect the fragment # collect(envs, actor, fragment, ctx, sticky=sticky, device=device) # # # fragment.pyt -- torch tensors, fragment.npy -- numpy arrays (aliased on-host) # do_stuff(actor, fragment.pyt) # ``` # <br> # ### Rollout collection (same-process) # Collect rollouts within the current process from rlplay.engine.rollout import same # The parameters have the following meaning # ```python # it = same.rollout( # envs, # the batch of environment instances # actor, # the actor which interacts with the batch # n_steps=51, # the length of the rollout fragment # sticky=False, # whether to stop interacting if an env resets mid-fragment # device=None, # specifies the device to put the actor's inputs onto # ) # ``` # `rollout()` returns an iterator, which has, roughly, the same logic, # as the manual collection above. # # Inside the infinite loop it copies `fragment.pyt` onto `device`, before # yielding it to the user. It also does not spawn its own batch of environments, # unlike parallel variants. # The user has to manually limit the number of iterations using, for example, # # ```python # it = same.rollout(...) # # for b, batch in zip(range(100), it): # # train on batch # pass # # it.close() # ``` # <br> # ### Rollout collection (single-process) # Single-actor rollout sampler running in a parallel process (double-buffered). from rlplay.engine.rollout import single # Under the hood the functions creates **two** rollout fragment buffers, maintains # a reference to the specified `actor`, makes a shared copy of it (on the host), and # then spawns one worker process. # # The worker, in turn, makes its own local copy of the actor on the specified device, # initializes the environments and the running context. During collection it alternates # between the buffers, into which it records the rollout fragments it collects. Except # for double buffering, the logic is identical to `rollout`. # # The local copies of the actor are **automatically updated** from the maintained reference. # ```python # it = single.rollout( # factory, # the environment factory # actor, # the actor reference, used to update the local actors # # n_steps, # the duration of a rollout fragment # n_envs, # the number of independent environments in the batch # # sticky=False, # do we freeze terminated environments until the end of the rollout? # # required if we wish to leverage cudnn's fast RNN implementations, # # instead of manually stepping through the RNN core. # # clone=True, # should the worker use a local clone of the reference actor # # close=True, # should we `.close()` the environments when cleaning up? # # some envs are very particular about this, e.g. nle # # start_method='fork', # `fork` in notebooks, `spawn` in linux/macos and if we interchange # # cuda tensors between processes (we DO NOT do that: we exchange indices # # to host-shapred tensors) # # device=None, # the device on which to collect rollouts (the local actor is moved # # onto this device) # ) # # # ... # # it.close() # ``` # <br> # ### Rollout collection (multi-process) # A more load-balanced multi-actor milti-process sampler from rlplay.engine.rollout import multi # This version of the rollout collector allocates several buffers and spawns # many parallel workers. Each worker creates it own local copy of the actor, # instantiates `n_envs` local environments and allocates a running context for # all of them. The rollout collection in each worker is **hardcoded to run on # the host device**. # ```python # it = multi.rollout( # factory, # the environment factory # actor, # the actor reference, used to update the local actors # # n_steps, # the duration of each rollout fragment # # n_actors, # the number of parallel actors # n_per_actor, # the number of independent environments run in each actor # n_buffers, # the size of the pool of buffers, into which rollout # # fragments are collected. Should not be less than `n_actors`. # n_per_batch, # the number of fragments collated into a batch # # sticky=False, # do we freeze terminated environments until the end of the rollout? # # required if we wish to leverage cudnn's fast RNN implementations, # # instead of manually stepping through the RNN core. # # pinned=False, # # clone=True, # should the parallel actors use a local clone of the reference actor # # close=True, # should we `.close()` the environments when cleaning up? # # some envs are very particular about this, e.g. nle # # device=None, # the device onto which to move the rollout batches # # start_method='fork', # `fork` in notebooks, `spawn` in linux/macos and if we interchange # # cuda tensors between processes (we DO NOT do that: we exchange indices # # to host-shared tensors) # ) # # # ... # # it.close() # ``` # <br> # ### Evaluation (same-process) # In order to evaluate an actor in a batch of environments, one can use `evaluate`. # + from rlplay.engine import core # help(core.evaluate) # - # The function *does not* collect the rollout data, except for the rewards. # Below is the intended use case. # * **NB** this is run in the same process, hence blocks until completion, which # might take considerable time (esp. if `n_steps` is unbounded) # same process def same_evaluate( factory, actor, n_envs=4, *, n_steps=None, close=True, render=False, device=None ): # spawn a batch of environments envs = [factory() for _ in range(n_envs)] try: while True: rewards, _ = core.evaluate( envs, actor, n_steps=n_steps, render=render, device=device) # get the accumulated rewards (gamma=1) yield sum(rewards) finally: if close: for e in envs: e.close() # <br> # ### Evaluation (parallel process) # Like rollout collection, evaluation can (and probably should) be performed in # a parallel process, so that it does not burden the main thread with computations # not related to training. from rlplay.engine.rollout.evaluate import evaluate # <br> # ## CartPole with REINFORCE # ### the CartPole Environment # + import gym # hotfix for gym's unresponsive viz (spawns gl threads!) import rlplay.utils.integration.gym # - # The environment factory # + class FP32Observation(gym.ObservationWrapper): def observation(self, observation): return observation.astype(numpy.float32) # obs[0] = 0. # mask the position info # return obs # observation.astype(numpy.float32) def factory(seed=None): return FP32Observation(gym.make("CartPole-v0").unwrapped) # - # <br> # ### the algorithms # Service functions for the algorithms # + from plyr import apply, suply, xgetitem def timeshift(state, *, shift=1): """Get current and shfited slices of nested objects.""" # use xgetitem to lett None through # XXX `curr[t]` = (x_t, a_{t-1}, r_t, d_t), t=0..T-H curr = suply(xgetitem, state, index=slice(None, -shift)) # XXX `next[t]` = (x_{t+H}, a_{t+H-1}, r_{t+H}, d_{t+H}), t=0..T-H next = suply(xgetitem, state, index=slice(shift, None)) return curr, next # - # The reinforce PG algo # + from rlplay.algo.returns import pyt_returns # @torch.enable_grad() def reinforce(fragment, module, *, gamma=0.99, C_entropy=1e-2): r"""The REINFORCE algorithm. The basic policy-gradient algorithm with a baseline $b_t$: $$ \nabla_\theta J(s_t) = \mathbb{E}_{a \sim \beta(a\mid s_t)} \frac{\pi(a\mid s_t)}{\beta(a\mid s_t)} \bigl( r_{t+1} + \gamma G_{t+1} - b_t \bigr) \nabla_\theta \log \pi(a\mid s_t) \,. $$ Details ------- It turns out that applying on-policy algo in off-policy setting and expecting it to produce acceptable results was a sure sign of stupidity on part of the author of this notebook. Oh, well... """ # get `.state[t]` and `.state[t+1]` state, state_next = timeshift(fragment.state) # REACT: (state[t], h_t) \to (\hat{a}_t, h_{t+1}, \hat{A}_t) _, _, info = module( state.obs, state.act, state.rew, state.fin, hx=fragment.hx, stepno=state.stepno) # Get the returns-to-go -- the present value of the future rewards # following `state[t]`: G_t = r_{t+1} + \gamma G_{t+1} # XXX bootstrap with the perpetual last reward? # bootstrap = state_next.rew[-1] # torch.tensor(0.) # bootstrap = state_next.rew.mean(dim=0) # .div_(1 - gamma) ret = pyt_returns(state_next.rew, state_next.fin, gamma=gamma, bootstrap=torch.tensor(0.)) # `.state_next[t].act` is the action taken in response to `.state[t]` # We assume it is unstructured and categorical. act = state_next.act.unsqueeze(-1) # the policy surrogate score (max) # \frac1T \sum_t (G_t - b_t) \log \pi(a_t \mid s_t) # ret.sub_(ret.mean(dim=0)) # .div_(ret.std(dim=0)) log_pi = info['logits'] # the current policy log_pi_a = log_pi.gather(-1, act).squeeze(-1) reinfscore = log_pi_a.mul(ret).mean() # the log-likelihood # the policy neg-entropy score (min) # - H(\pi(\cdot \mid s)) = - (-1) \sum_a \pi(a\mid s) \log \pi(a\mid s) f_min = torch.finfo(log_pi.dtype).min negentropy = log_pi.exp().mul(log_pi.clamp(min=f_min)).sum(dim=-1).mean() # maximize the entropy and the reinforce score # \ell := - \frac1T \sum_t G_t \log \pi(a_t \mid s_t) # - C \mathbb{H} \pi(\cdot \mid s_t) loss = C_entropy * negentropy - reinfscore return loss.mean(), dict( entropy=-float(negentropy), policy_score=float(reinfscore), ) # - # <br> # ### the Actor # A procedure and a layer, which converts the input integer data into its # little-endian binary representation as float $\{0, 1\}^m$ vectors. # + def onehotbits(input, n_bits=63, dtype=torch.float): """Encode integers to fixed-width binary floating point vectors""" assert not input.dtype.is_floating_point assert 0 < n_bits < 64 # torch.int64 is signed, so 64-1 bits max # n_bits = {torch.int64: 63, torch.int32: 31, torch.int16: 15, torch.int8 : 7} # get mask of set bits pow2 = torch.tensor([1 << j for j in range(n_bits)]).to(input.device) x = input.unsqueeze(-1).bitwise_and(pow2).to(bool) # upcast bool to float to get one-hot return x.to(dtype) class OneHotBits(torch.nn.Module): def __init__(self, n_bits=63, dtype=torch.float): assert 1 <= n_bits < 64 super().__init__() self.n_bits, self.dtype = n_bits, dtype def forward(self, input): return onehotbits(input, n_bits=self.n_bits, dtype=self.dtype) # - # A special module dictionary, which applies itself to the input dict of tensors # + from typing import Optional, Mapping from torch.nn import Module, ModuleDict as BaseModuleDict class ModuleDict(BaseModuleDict): """The ModuleDict, that applies itself to the input dicts.""" def __init__( self, modules: Optional[Mapping[str, Module]] = None, dim: Optional[int]=-1 ) -> None: super().__init__(modules) self.dim = dim def forward(self, input): # enforce concatenation in the order of the declaration in __init__ return torch.cat([ m(input[k]) for k, m in self.items() ], dim=self.dim) # - # A policy which uses many inputs. # + from torch.nn import Sequential from torch.nn import Embedding, Linear, Identity from torch.nn import ReLU, LogSoftmax def policy(): return Sequential( ModuleDict(dict( # stepno=Sequential( # OneHotBits(), Linear(63, 4, bias=False) # ), obs=Identity(), act=Embedding(2, 2), )), Linear(0 + 4 + 2, 32), ReLU(), Linear(32, 2), LogSoftmax(dim=-1), ) # - # The discount factor gamma = 0.99 C_entropy = 0.1 # Initialize the learner and the factories # + from functools import partial factory_eval = partial(factory) learner, sticky = PolicyWrapper(policy()), False learner.train() device_ = torch.device('cpu') # torch.device('cuda:0') learner.to(device=device_) # prepare the optimizer for the learner optim = torch.optim.Adam(learner.parameters(), lr=1e-3) # - # Pick one collector # * the `fork` method is friendlier towards notebooks, but some environments, like the NetHack environment, do not like it # * unlike `fork`, the `spawn` method is `torch.cuda` compatible in that it allows moving on-device tensors between processes. It is not notebook friendly, however :( # * essentially it is better to prototype in notebook with `same.rollout`, then write a submodule non-interactive script with `multi.rollout` # `REINFORCE` and `A2C` methods do not work ~~well~~ in off-policy setting, so we use # the same-process collector, which guarantees on-policy trajectory data. T, B = 120, 8 # Initialize the sampler # generator of rollout batches batchit = same.rollout( [factory() for _ in range(B)], learner, n_steps=T, sticky=sticky, device=device_, ) # + active="" # from rlplay.engine.rollout import episodic # # # generator of rollout batches # batchit = episodic.rollout( # [factory() for _ in range(B)], # learner, # batch_size=8, # device=device_, # ) # + active="" # # generator of rollout batches # batchit = single.rollout( # factory, # learner, # n_steps=T, # n_envs=B, # sticky=sticky, # so that we can leverage cudnn's fast RNN implementations # clone=False, # close=False, # device=device_, # start_method='fork', # fork in notebook for macos, spawn in linux # ) # + active="" # # generator of rollout batches # batchit = multi.rollout( # factory, # learner, # n_steps=T, # n_actors=16, # n_per_actor=B, # n_buffers=24, # n_per_batch=2, # sticky=sticky, # so that we can leverage cudnn's fast RNN implementations # pinned=False, # clone=True, # close=False, # device=device_, # start_method='fork', # fork in notebook for macos, spawn in linux # ) # - # Generator of evaluation rewards: # * we're perfectly OK with evaluating in a parallel process # test_it = test(factory_eval, learner, n_envs=4, n_steps=500, device=device_) test_it = evaluate(factory_eval, learner, n_envs=4, n_steps=500, clone=False, device=device_, start_method='fork') # Implement your favorite training method # + active="" # torch.autograd.set_detect_anomaly(True) # + import tqdm # from math import log, exp from torch.nn.utils import clip_grad_norm_ # pytoch loves to hog all threads on some linux systems torch.set_num_threads(1) # the training loop losses, rewards, samples = [], [], [] # decay = -log(2) / 25 # exploration epsilon half-life for epoch in tqdm.tqdm(range(100)): for j, batch in zip(range(40), batchit): loss, info = reinforce(batch, learner, gamma=gamma, C_entropy=C_entropy) optim.zero_grad() loss.backward() grad_norm = clip_grad_norm_(learner.parameters(), max_norm=1.0) optim.step() losses.append(dict( loss=float(loss), grad=float(grad_norm), **info )) # This is an example of how to save a batch: we need to clone, # because the fragment buffer is static, and will be overwritten! samples.append(suply(torch.clone, batch)) # fetch the evaluation results (lag by one inner loop!) rewards.append(next(test_it)) # learner.epsilon.mul_(exp(decay)).clip_(0.1, 1.0) # + # stack all samples samples = tuply(torch.stack, *samples) # close the generators batchit.close() test_it.close() # + active="" # import pdb; pdb.pm() # - # <br> data = {k: numpy.array(v) for k, v in collate(losses).items()} if 'loss' in data: plt.plot(data['loss']) if 'entropy' in data: plt.plot(data['entropy']) if 'policy_score' in data: plt.plot(data['policy_score']) plt.semilogy(data['grad']) rewards = numpy.stack(rewards, axis=0) rewards m, s = numpy.median(rewards, axis=-1), rewards.std(axis=-1) # + fi, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300) ax.plot(numpy.mean(rewards, axis=-1)) ax.plot(numpy.median(rewards, axis=-1)) ax.plot(numpy.min(rewards, axis=-1)) ax.plot(numpy.std(rewards, axis=-1)) # ax.plot(m+s * 1.96) # ax.plot(m-s * 1.96) plt.show() # - # <br> # The ultimate evaluation run # + with factory_eval() as env: learner.eval() eval_rewards, info = core.evaluate([ env ], learner, render=True, n_steps=1e4, device=device_) print(sum(eval_rewards)) # + active="" # import pdb; pdb.pm() # - plt.hist(numpy.exp(info['logits']).argmax(-1)) # <br> # Let's analyze the performance # + import math from scipy.special import softmax, expit, entr *head, n_actions = info['logits'].shape proba = softmax(info['logits'], axis=-1) fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300) ax.plot(entr(proba).sum(-1)[:, 0]) ax.axhline(math.log(n_actions), c='k', alpha=0.5, lw=1); # - fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300) ax.hist(info['logits'][..., 1] - info['logits'][..., 0], bins=51); # log-ratio # <br> assert False # + active="" # import pdb; pdb.pm() # - # <br> # stepno = batch.state.stepno stepno = torch.arange(8192) with torch.no_grad(): out = learner.policy[0]['stepno'](stepno) # + fig, axes = plt.subplots(2, 2, figsize=(8, 8), dpi=200, sharex=True, sharey=True) for j, ax in zip(range(out.shape[1]), axes.flat): ax.plot(out[:, j], lw=1) fig.tight_layout(pad=0, h_pad=0, w_pad=0) # - with torch.no_grad(): plt.imshow(abs(learner.policy[4].weight) @ abs(learner.policy[1].weight)) with torch.no_grad(): plt.imshow(abs(learner.policy[0]['stepno'][-1].weight)[:, :16].T) assert False # <br>
stage/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="wvIfMpbfAISE" colab_type="code" colab={} import pandas as pd import itertools import re import nltk import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn import datasets, cluster from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import GaussianNB from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn import metrics from sklearn.metrics import plot_confusion_matrix # + id="qjfBwEirQfz7" colab_type="code" outputId="7637495e-b66b-4de7-f9fe-e82d00312921" executionInfo={"status": "ok", "timestamp": 1590095933033, "user_tz": 300, "elapsed": 5007, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 70} nltk.download('punkt') # + id="5M5qOxjAVSo-" colab_type="code" outputId="7a8b69aa-2b26-41e2-8d13-e46e745a5674" executionInfo={"status": "ok", "timestamp": 1590095933034, "user_tz": 300, "elapsed": 4998, "user": {"displayName": "<NAME>", "photoUrl": "<KEY>", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 70} nltk.download('stopwords') # + id="qQi1PeGOAQna" colab_type="code" colab={} # Luis' path prods = pd.read_json("/content/drive/My Drive/MScA/Machine Learning/ML Final Project/Data/meta_Video_Games.json", lines = True) # + id="-vX8TUx-GWEu" colab_type="code" colab={} # Sarah's path # prods = pd.read_json("/content/drive/My Drive/ml_data/meta_Video_Games.json", lines = True) # + id="GTsZ-n9xFYnW" colab_type="code" colab={} prods.drop_duplicates(subset = "asin", keep = "first", inplace = True) # + id="QYlCSjSVAVyP" colab_type="code" outputId="e3e13aae-d27d-4eee-fe23-46ea9033eb07" executionInfo={"status": "ok", "timestamp": 1590095978186, "user_tz": 300, "elapsed": 5166, "user": {"displayName": "<NAME>", "photoUrl": "<KEY>", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 34} prods.shape # + id="xVfrmiTCJrzs" colab_type="code" outputId="972031a7-ffa4-40e7-91e3-72dd3dd89b99" executionInfo={"status": "ok", "timestamp": 1590095978187, "user_tz": 300, "elapsed": 2912, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 381} prods.head() # + id="MZdM_Li5Ffvg" colab_type="code" outputId="d3f5ad64-e042-4373-d0a7-cfd5560a3c71" executionInfo={"status": "ok", "timestamp": 1589472916905, "user_tz": 300, "elapsed": 726, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 34} sum(prods.feature.isna()) # + id="xW9HOKziF5NZ" colab_type="code" colab={} features = prods.dropna(subset = ['feature']) # + id="v2GlHBh2pR-z" colab_type="code" colab={} features = prods.dropna(subset = ['category']) # + id="C1XLfcgQpSnr" colab_type="code" outputId="31872245-1af3-44c0-a41f-b8de6742d8f3" executionInfo={"status": "ok", "timestamp": 1589472919555, "user_tz": 300, "elapsed": 1040, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} [i.remove("Video Games") for i in features['category']] # + id="JBT6UFuyqbe6" colab_type="code" outputId="6db35f6b-4241-4857-bc8b-24dca0eec1f9" executionInfo={"status": "ok", "timestamp": 1589472951509, "user_tz": 300, "elapsed": 1039, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 221} features['category'] # + id="W81HRfBeeJe8" colab_type="code" outputId="9380eb32-1b82-461e-d177-e62e18a4c9d7" executionInfo={"status": "ok", "timestamp": 1589472956901, "user_tz": 300, "elapsed": 550, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 119} features["game_cat"] = ["Games" in i for i in features['category']] # + id="rU7voWEQq0Of" colab_type="code" outputId="9ef0dd65-fd7f-4d0a-9b52-accddaa21b26" executionInfo={"status": "ok", "timestamp": 1589472966967, "user_tz": 300, "elapsed": 8266, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} [print(i) for i in features['game_cat']] # + id="l0WFF4dLedNP" colab_type="code" colab={} features = features[features.game_cat != False] # + id="3xZO10oCGCYj" colab_type="code" outputId="340daad4-9edf-4b5b-930f-ca4dfb2ff02c" executionInfo={"status": "ok", "timestamp": 1589472969962, "user_tz": 300, "elapsed": 511, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 598} features.head() # + id="r6c7uEylIsjw" colab_type="code" colab={} features.dropna(subset=["feature"],inplace = True) # + id="wSTdFn3MIuj5" colab_type="code" colab={} # from the feature column, join the features into one big string feat_list = [] for r in features.feature: j = ' '.join(r) feat_list.append(j) # + id="rH3X_yhwNNG6" colab_type="code" outputId="be991036-ff4f-4f69-dbf4-b180695c8cad" executionInfo={"status": "ok", "timestamp": 1589472533921, "user_tz": 300, "elapsed": 871, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 207} feat_list[0:10] # + id="JHVtADbV7XLT" colab_type="code" outputId="91a389b8-59ef-40ed-c3f9-afc2d6a9a99b" executionInfo={"status": "ok", "timestamp": 1589472536300, "user_tz": 300, "elapsed": 1181, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 34} len(np.array(feat_list)) # + id="ygJzhdXj8qtT" colab_type="code" colab={} fdf = pd.DataFrame(np.array(feat_list)) # + id="6nFLnW9b7YsT" colab_type="code" outputId="fa48ea04-2e9a-4034-bc82-7d2830eacf7b" executionInfo={"status": "ok", "timestamp": 1589472543135, "user_tz": 300, "elapsed": 536, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 34} len(features.feature) # + id="bl0_khhs8wjK" colab_type="code" colab={} fdf.columns = ['feat_list'] # + id="j_3GAFNf8-ha" colab_type="code" outputId="2a5af536-16db-44d8-8f9e-b0ea57dac710" executionInfo={"status": "ok", "timestamp": 1589472545575, "user_tz": 300, "elapsed": 639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 204} fdf.head() # + id="aVP65I97NECw" colab_type="code" colab={} features['feat_list'] = fdf['feat_list'] # + id="Ap3KTMQt7wDj" colab_type="code" outputId="43abdc3a-b006-4836-9373-313537e0f4ba" executionInfo={"status": "ok", "timestamp": 1589472547458, "user_tz": 300, "elapsed": 365, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 700} features.head() # + id="VgZ3t5HKToMu" colab_type="code" colab={} features.reset_index(inplace=True) # + id="LYSR9KSA8mQQ" colab_type="code" outputId="6292d13e-1530-4bca-dfcf-a98200e5e24d" executionInfo={"status": "ok", "timestamp": 1589472558640, "user_tz": 300, "elapsed": 1839, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 700} features.head() # + id="iBrr8r4qFOhW" colab_type="code" outputId="ec9633ea-7e0a-4a83-ba21-be158ee80627" executionInfo={"status": "ok", "timestamp": 1589474867419, "user_tz": 300, "elapsed": 653, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 221} features["features_joined"] = features.feature.str.join(" ") features["features_joined"] # + id="CX30BEBJ8Oeo" colab_type="code" colab={} features.to_csv("/content/drive/My Drive/MScA/Machine Learning/ML Final Project/Data/features_clean.csv") # + id="Oit1NuS075WW" colab_type="code" colab={} feat_idx = features.columns.get_loc('feat_list') # + id="-r-R0H33MFF2" colab_type="code" colab={} # pull out 10 of 50 most common words for categorizing # + id="qFX7y9TO9yI7" colab_type="code" colab={} features.dropna(subset = ['feat_list'], inplace = True) # + id="bmLZL4rHCfhk" colab_type="code" outputId="48edfc16-eeb6-4768-ea3b-d82046ab8730" executionInfo={"status": "ok", "timestamp": 1589472339977, "user_tz": 300, "elapsed": 515, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiHm_gl0Be0uWIlRtZbdiCSRPpl0jgDwy6bkBn=s64", "userId": "01272390123410607150"}} colab={"base_uri": "https://localhost:8080/", "height": 34} len(features.feat_list) # + id="a-z5B_ykPfSM" colab_type="code" colab={} for i, row in features.iterrows(): i_val = [nltk.tokenize.word_tokenize(i) for i in str(features.iloc[i, feat_idx]).split(' ')] features.at[i,"feat_list"] = [j for i in i_val for j in i] # + id="GeI-PKokQxUO" colab_type="code" colab={} features_clean = features['feat_list'] features_clean = features_clean.apply(lambda x: [i for i in x if len(i) > 1]) features_clean = features_clean.apply(lambda x: [i for i in x if i.isalpha()]) stopwords = set(nltk.corpus.stopwords.words('english')) features_clean = features_clean.apply(lambda x: [i for i in x if i not in stopwords]) # + id="lNlTMuyMQATS" colab_type="code" outputId="174d54e8-7c44-418c-fb6c-3af70e1daebf" executionInfo={"status": "ok", "timestamp": 1589223705080, "user_tz": 300, "elapsed": 58795, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 34} len(features) # + id="DIB9ieF8VYEb" colab_type="code" outputId="51e7316c-df1e-46e5-c65e-81247bd934ab" executionInfo={"status": "ok", "timestamp": 1589223705487, "user_tz": 300, "elapsed": 59142, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} f_clean = np.array(features_clean) f_clean = list(itertools.chain.from_iterable(f_clean)) fdist = nltk.FreqDist(f_clean) fdist.most_common(500) # + id="LhHDZEesMoJN" colab_type="code" colab={} common = fdist.most_common(500) # + id="DNAtXS6UJn8O" colab_type="code" colab={} attr500 = {} for k in common: attr500.update({k[0].lower(): []}) # + id="ypVbMtU7Qyuz" colab_type="code" outputId="11716316-6302-4878-e371-dc556459bdba" executionInfo={"status": "ok", "timestamp": 1589226280368, "user_tz": 300, "elapsed": 301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 34} len(attr500.keys()) # + id="hQsVlJcDVltd" colab_type="code" colab={} # if the list in the cell contains this word # lowercase the list of the words # + id="Bhlmv-fOaOJj" colab_type="code" colab={} for r in features.feat_list: j = [i.lower() for i in r] l = [i.lower() + 's' for i in r] for k, v in attr500.items(): if (k in j) | (k in l): attr500[k].append(1) else: attr500[k].append(0) # + id="9WNnaeD6ECVE" colab_type="code" colab={} # + id="xlCqBJs8a4-n" colab_type="code" colab={} attr_df = pd.DataFrame(attr500) # + id="VNrnJR42RaLo" colab_type="code" colab={} # run PCA on attr_df pca = PCA(50) # + id="w1jtiJvWR5K_" colab_type="code" outputId="5aacaa16-b8df-49b5-9133-34d3312764b0" executionInfo={"status": "ok", "timestamp": 1589227188094, "user_tz": 300, "elapsed": 1133, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 51} pca.fit(attr_df) # + id="fT252wOgR_FM" colab_type="code" outputId="6e665bed-193e-4756-d98c-a256406ec0f6" executionInfo={"status": "ok", "timestamp": 1589227197891, "user_tz": 300, "elapsed": 384, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(sum(pca.explained_variance_ratio_)) # + id="qvf59inCSBFE" colab_type="code" colab={} agglo = cluster.FeatureAgglomeration(n_clusters=8) # + id="UZKBnA-cTCZ_" colab_type="code" outputId="f0e92bdc-63bc-4d6c-add2-cd472ffdf0bc" executionInfo={"status": "ok", "timestamp": 1589227177156, "user_tz": 300, "elapsed": 1764, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 85} agglo.fit(attr_df) # + id="N8kVj69gTEUh" colab_type="code" colab={} attr_reduced = agglo.transform(attr_df) # + id="CC3F0p-ITJ1E" colab_type="code" outputId="414259a0-67eb-4199-dc43-108d6d0dbafc" executionInfo={"status": "ok", "timestamp": 1589227177268, "user_tz": 300, "elapsed": 509, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0oGgn_qZlINHi20dR6jGMK0DvG5jyRVOkTLIkuwQ=s64", "userId": "16177098776388002463"}} colab={"base_uri": "https://localhost:8080/", "height": 419} pd.DataFrame(attr_reduced) # + id="hthTu-8ETiot" colab_type="code" colab={}
Analysis/Content-Based/Feature_NLP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Intro to profiling # Python's dirty little secret is that it can be made to run pretty fast. # # The bare-metal HPC people will be angrily tweeting at me now, or rather, they would be if they could get their wireless drivers working. # # Still, there are some things you *really* don't want to do in Python. Nested loops are usually a bad idea. But often you won't know where your code is slowing down just by looking at it and trying to accelerate everything can be a waste of time. (Developer time, that is, both now and in the future: you incur technical debt if you unintentionally obfuscate code to make it faster when it doesn't need to be). # # The first step is always to find the bottlenecks in your code, via _profiling_: analyzing your code by measuring the execution time of its parts. # Tools # ----- # # 2. `cProfile` # 1. [`line_profiler`](https://github.com/rkern/line_profiler) # 3. `timeit` # # **Note**: # If you haven't already installed it, you can do # # ```console # conda install line_profiler # ``` # # or # # ```console # pip install line_profiler # ``` # ## Some bad code # # Here's a bit of code guaranteed to perform poorly: it sleeps for 1.5 seconds after doing any work! We will profile it and see where we might be able to help. # + import numpy from time import sleep def bad_call(dude): sleep(.5) def worse_call(dude): sleep(1) def sumulate(foo): if not isinstance(foo, int): return a = numpy.random.random((1000, 1000)) a @ a ans = 0 for i in range(foo): ans += i bad_call(ans) worse_call(ans) return ans # - sumulate(150) # ## using `cProfile` # [`cProfile`](https://docs.python.org/3.4/library/profile.html#module-cProfile) is the built-in profiler in Python (available since Python 2.5). It provides a function-by-function report of execution time. First import the module, then usage is simply a call to `cProfile.run()` with your code as argument. It will print out a list of all the functions that were called, with the number of calls and the time spent in each. import cProfile cProfile.run('sumulate(150)') # You can see here that when our code `sumulate()` executes, it spends almost all its time in the method `time.sleep` (a bit over 1.5 seconds). # # If your program is more complicated that this cute demo, you'll have a hard time parsing the long output of `cProfile`. In that case, you may want a profiling visualization tool, like [SnakeViz](https://jiffyclub.github.io/snakeviz/). But that is outside the scope of this tutorial. # ## using `line_profiler` # `line_profiler` offers more granular information thatn `cProfile`: it will give timing information about each line of code in a profiled function. # Load the `line_profiler` extension # %load_ext line_profiler # ### For a pop-up window with results in notebook: # IPython has an `%lprun` magic to profile specific functions within an executed statement. Usage: # `%lprun -f func_to_profile <statement>` (get more help by running `%lprun?` in IPython). # ### Profiling two functions # %lprun -f bad_call -f worse_call sumulate(13) # ### Write results to a text file # %lprun -T timings.txt -f sumulate sumulate(12) # + # # %load timings.txt # - # ## Profiling on the command line # Open file, add `@profile` decorator to any function you want to profile, then run # # ```console # kernprof -l script_to_profile.py # ``` # # which will generate `script_to_profile.py.lprof` (pickled result). To view the results, run # # ```console # python -m line_profiler script_to_profile.py.lprof # ``` from IPython.display import IFrame IFrame('http://localhost:7000/terminals/1', width=800, height=700) # ## `timeit` # `timeit` is not perfect, but it is helpful. # # Potential concerns re: `timeit` # # * Only runs benchmark 3 times # * It disables garbage collection # # ```python # python -m timeit -r 25 "print(42)" # ``` # # ```python # python -m timeit -s "gc.enable()" "print(42)" # ``` # # Victor Stinner has a module, `perf`, which is more robust and addresses these concerns. You can check it out at: https://perf.readthedocs.io/en/latest/user_guide.html # ### Line magic # %timeit x = 5 # ### Cell magic # %%timeit x = 5 y = 6 x + y # The `-q` flag quiets output. The `-o` flag allows outputting results to a variable. The `-q` flag sometimes disagrees with OSX so please remove it if you're having issues. # a = %timeit -qo x = 5
notebooks/01.When.where.to.use.Numba.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + deletable=true editable=true import algolab import sys #sys.path.append('../exams/2017-02-16-lab/solutions') sys.path.append('past-exams/2017-02-16/solutions') import nxpd algolab.init() # + [markdown] deletable=true editable=true # <center> # <span class="algolab-title"> Algolab Exam</span><br/><br/> # <span style="font-size:20px"> Scientific Programming Module 2</span> <br/> # <span style="font-size:20px"> Algorithms and Data Structures </span> <br/><br/> # <span> Thusday 16th, Feb 2017</span><br/><br/> # # # </center> # + [markdown] deletable=true editable=true # # Introduction # # * Taking part to this exam erases any vote you had before, both lab and theory # * If you don't ship or you don't pass this lab part, you lose also the theory part. # # # * Log into your computer in _exam mode_, it should start Ubuntu # * To edit the files, you can use any editor of your choice: _Editra_ seems easy to use, you can find it under _Applications->Programming->Editra_. Others could be _GEdit_ (simpler), or _PyCharm_ (more complex). # # # # ## Allowed material # # There won't be any internet access. You will only be able to access: # # * <a href="index.html" target="_blank">Sciprog Algolab worksheets</a> # * <a href="../montresor/Montresor%20sciprog/cricca.disi.unitn.it/montresor/teaching/scientific-programming/slides/index.html">Alberto Montresor slides</a> # * <a href="../teso/disi.unitn.it/_teso/courses/sciprog/index.html" target="_blank">Stefano Teso docs</a> # * Python 2.7 documentation :&nbsp;&nbsp; <a href="../python-docs/html/index.html" target="_blank">html</a> # &nbsp;&nbsp;<a href="../python-docs/pdf" target="_blank">pdf</a> # * In particular, <a href="../python-docs/html/library/unittest.html" target="_blank">Unittest docs</a> # * The course book _Problem Solving with Algorithms and Data Structures using Python_ &nbsp;&nbsp;<a href="../pythonds/index.html" target="_blank">html</a> &nbsp;&nbsp;&nbsp;<a href="../pythonds/ProblemSolvingwithAlgorithmsandDataStructures.pdf" target="_blank">pdf</a> # # # ## Grading # # # * The grade of this lab part will range from 0 to 30. Total grade for the module will be given by the average with the theory part of Alberto Montresor. # * Correct implementations with the required complexity grant you full grade. # * Partial implementations _might_ still give you a few points. If you just can't solve an exercise, try to solve it at least for some subcase (i.e. array of fixed size 2) commenting why you did so. # * One bonus point can be earned by writing stylish code. You got style if you: # # - do not infringe the [Commandments](../algolab/index.html#Commandments) # - write [pythonic code](http://docs.python-guide.org/en/latest/writing/style) # - avoid convoluted code like i.e. # # ``` # if x > 5: # return True # else: # return False # ``` # # when you could write just # # ``` # return x > 5 # ``` # # + deletable=true editable=true # %%HTML <p class="algolab-warn"> !!!!!!!!! WARNING !!!!!!!!! <br/> <br/> !!!!!!!!! **ONLY** IMPLEMENTATIONS OF THE PROVIDED FUNCTION SIGNATURES WILL BE EVALUATED !!!!!!!!! <br/> </p> # + [markdown] deletable=true editable=true # # For example, if you are given to implement: # # ```python # def cool_fun(x): # raise Exception("TODO implement me") # ``` # # and you ship this code: # # ``` python # def cool_fun_non_working_trial(x): # # do some absurdity # # def cool_fun_a_perfectly_working_trial(x): # # a super fast, correct and stylish implementation # # def cool_fun(x): # raise Exception("TODO implement me") # ``` # # We will assess only the latter one `cool_fun(x)`, and conclude it doesn't work at all :P !!!!!!! # # Still, you are allowed to define any extra helper function you might need. If your `cool_fun(x)` implementation calls some other function you defined like `my_helper` here, it is ok: # # ```python # # def my_helper(y,z): # # do something useful # # def cool_fun(x): # my_helper(x,5) # # # this will get ignored: # def some_trial(x): # # do some absurdity # # ``` # # + [markdown] deletable=true editable=true # ## What to do # # # In <a href="/usr/local/esame" target="_blank">/usr/local/esame</a> you should find a file named `algolab-17-01-26.zip`. Download it and extract it on your desktop. The content should be like this: # # ``` # algolab-17-01-26 # |- FIRSTNAME-LASTNAME-ID # |- exercise1-slow.py # |- exercise1-fast.py # |- exercise2.py # |- exercise3.py # # ``` # # 2) Check this folder also shows under `/var/exam`. # # 3) Rename `FIRSTNAME-LASTNAME-ID` folder: put your name, lastname an id number, like `john-doe-432432` # # From now on, you will be editing the files in that folder. At the end of the exam, that is what will be evaluated. # # 4) Edit the files following the instructions in this worksheet for each exercise. # # # + deletable=true editable=true # %%HTML <p class="algolab-warn"> WARNING: <i>DON'T</i> modify function signatures! Just provide the implementation. </p> <p class="algolab-warn"> WARNING: <i>DON'T</i> change the existing test methods, just add new ones !!! You can add as many as you want. </p> <p class="algolab-warn"> WARNING: <i>DON'T</i> create other files. If you still do it, they won't be evaluated. </p> <p class="algolab-important"> IMPORTANT: Pay close attention to the comments of the functions. </p> <p class="algolab-important"> IMPORTANT: if you need to print some debugging information, you <i>are allowed</i> to put extra <code>print</code> statements in the function bodies. </p> <p class="algolab-warn"> WARNING: even if <code>print</code> statements are allowed, be careful with prints that might break your function, i.e. avoid stuff like this: <code> print 1/0 </code> </p> # + [markdown] deletable=true editable=true # 3) Every exercise should take max 25 mins. If it takes longer, leave it and try another exercise. # # + deletable=true editable=true # %%HTML <p class="algolab-warn"> WARNING: MAKE SURE ALL EXERCISE FILES AT LEAST COMPILE !!! <br/> 10 MINS BEFORE THE END OF THE EXAM I WILL ASK YOU TO DO A FINAL CLEAN UP OF THE CODE </p> # + [markdown] deletable=true editable=true # # 1) BoolStack # # # You are given a class `BoolStack` that models a simple stack. This stack is similar to the `CappedStack` you already saw in class, the only differences being: # # - it can only contain booleans, trying to put other type of values will raise a `ValueError` # - trying to `pop` or `peek` an empty stack will raise an `IndexError` # - there is no cap # + deletable=true editable=true from exercise1_slow_solution import * # + [markdown] deletable=true editable=true # To create a `BoolStack`, just call it: # + deletable=true editable=true bs = BoolStack() print bs # + deletable=true editable=true bs.push(True) # + deletable=true editable=true print bs # + deletable=true editable=true bs.push(False) # + deletable=true editable=true print bs # + deletable=true editable=true print bs.pop() # + deletable=true editable=true print bs # + deletable=true editable=true print bs.pop() # + deletable=true editable=true print bs # + [markdown] deletable=true editable=true # ## 1.0) test `BoolStack` # # Now start editing the file `exercise1_slow.py`. To check your environment is working fine, try to run the tests for `BoolStackTest`, which contain tests for the already implemented methods `pop`, `push`, etc ... # # **Notice that `exercise1_slow` is followed by a dot and test class name: `.BoolStackTest` ** # # ```bash # # python -m unittest exercise1_slow.BoolStackTest # # ``` # + deletable=true editable=true algolab.run(BoolStackTest) # + [markdown] deletable=true editable=true # ## 1.1) `true_count`, slow version # # Implement the `true_count` method inside the class, **just working on this method alone**: # # ```python # # def true_count(self): # """ Return the number of elements which are True in O(n), where n is the size of stack. """ # # raise Exception("TODO IMPLEMENT ME !") # # ``` # # **Testing** # # Once done, running this will run only the tests in `TrueCountTest` class and hopefully they will pass. # # **Notice that `exercise1_slow` is followed by a dot and test class name `.TrueCountTest` : ** # # ```bash # # python -m unittest exercise1_slow.TrueCountTest # # ``` # # + deletable=true editable=true algolab.run(TrueCountTest) # + [markdown] deletable=true editable=true # # # ## 1.2) `true_count`, fast version # # Now start editing the file `exercise1_fast.py`: inside you will find the class `FastBoolStack`. Your goal now is to implement a `true_count` method that works in `O(1)`. To make this possible, you are allowed to add any field you want in the constructor and you can also change any other method you deem necessary (like `push`) . # # # ```python # def true_count(self): # """ Return the number of elements which are True # # *** MUST EXECUTE IN O(1) *** # """ # raise Exception("TODO IMPLEMENT ME !") # ``` # # # ** Testing **: # + deletable=true editable=true # %%HTML <p class="algolab-warn"> WARNING: Since you are going to modify the whole class, make sure tests pass BOTH for <code>FastBoolStackTest</code> AND <code>TrueCountTest</code> ! </p> # + [markdown] deletable=true editable=true # # **Tests for `push`, `pop`, etc**: # # ` python -m unittest exercise1_fast.FastBoolStackTest` # # **Tests just for `true_count`**: # # ` python -m unittest exercise1_fast.TrueCountTest` # # + deletable=true editable=true from exercise1_fast_solution import * # + deletable=true editable=true algolab.run(FastBoolStackTest) # + deletable=true editable=true algolab.run(TrueCountTest) # + [markdown] deletable=true editable=true # # 2) UnorderedList # # Start editing file `exercise2.py`, which contains a simplified versioned of the `UnorderedList` we saw in the labs. # # + deletable=true editable=true from exercise2_solution import * # + [markdown] deletable=true editable=true # # ## 2.1) `dup_first` # # Implement the method `dup_first`: # # ```python # def dup_first(self): # """ Modifies this list by adding a duplicate of first node right after it. # # For example, the list 'a','b','c' should become 'a','a','b','c'. # An empty list remains unmodified. # # ** DOES NOT RETURN ANYTHING !!! ** # # """ # # raise Exception("TODO IMPLEMENT ME !") # ``` # # **Testing:** `python -m unittest exercise2.DupFirstTest` # + deletable=true editable=true algolab.run(DupFirstTest) # + [markdown] deletable=true editable=true # ## 2.2) `dup_all` # # Implement the method `dup_all`: # # ```python # def dup_all(self): # """ Modifies this list by adding a duplicate of each node right after it. # # For example, the list 'a','b','c' should become 'a','a','b','b','c','c'. # An empty list remains unmodified. # # ** MUST PERFORM IN O(n) WHERE n is the length of the list. ** # # ** DOES NOT RETURN ANYTHING !!! ** # """ # # raise Exception("TODO IMPLEMENT ME !") # ``` # # **Testing:** `python -m unittest exercise2.DupAllTest` # # + deletable=true editable=true algolab.run(DupAllTest) # + [markdown] deletable=true editable=true # # 3) DiGraph # # Now you are going to build some `DiGraph`, by defining functions _external_ to class `DiGraph`. # + deletable=true editable=true # %%HTML <p class="algolab-warn" target="_blank"> WARNING: To build the graphs, just use the methods you find inside <code>DiGraph</code> class, like <code>add_vertex</code>, <code>add_edge</code>, etc. </p> # + [markdown] deletable=true editable=true # Start editing file `exercise3.py` # + deletable=true editable=true from exercise3_solution import * # + [markdown] deletable=true editable=true # ## 3.1) pie # # + [markdown] deletable=true editable=true # # Implement the function `pie`. Note the function is defined *outside* `DiGraph` class. # # # ```python # """ # Returns a DiGraph with n+1 verteces, displaced like a polygon with a perimeter # of n verteces progressively numbered from 1 to n. # A central vertex numbered zero has outgoing edges to all other verteces. # # For n = 0, return the empty graph. # For n = 1, return vertex zero connected to node 1, and node 1 has a self-loop. # # """ # raise Exception("TODO IMPLEMENT ME !") # ``` # # ** Testing: ** `python -m unittest exercise3.PieTest` # # # + deletable=true editable=true algolab.run(DiGraphTest) # + deletable=true editable=true algolab.run(PieTest) # + [markdown] deletable=true editable=true # ** Example usage **: # # For _n=5_, the function creates this graph: # # + deletable=true editable=true print pie(5) # + [markdown] deletable=true editable=true # <img src="img/pie.svg" width="250px"> # + [markdown] deletable=true editable=true # **Degenerate cases**: # + deletable=true editable=true print pie(0) # + deletable=true editable=true print pie(1) # + deletable=true editable=true nxpd.draw(algolab.to_nx(pie(1)), show='ipynb') # + [markdown] deletable=true editable=true # ## 3.2) Flux Capacitor # # A _Flux Capacitor_ is a plutonium-powered device that enables time travelling. During the 80s it was installed on a Delorean car and successfully used to ride humans back and forth across centuries: # # <img src="img/flux-capacitor.jpg" width="200px"> # # In this exercise you will build a Flux Capacitor model as a Y-shaped `DiGraph`, created according to a parameter `depth`. Here you see examples at different depths: # # + [markdown] deletable=true editable=true # <img src="img/flux-capacitor.svg" width="700px"> # + [markdown] deletable=true editable=true # Implement the function `flux`. Note the function is defined *outside* `DiGraph` class: # # ```python # def flux(depth): # """ Returns a DiGraph with 1 + (d * 3) numbered verteces displaced like a Flux Capacitor: # # - from a central node numbered 0, three branches depart # - all edges are directed outward # - on each branch there are 'depth' verteces. # # For example, for depth=2 we get the following graph (suppose arrows point outward): # # 4 5 # \ / # 1 2 # \ / # 0 # | # 3 # | # 6 # # """ # raise Exception("TODO IMPLEMENT ME !") # ``` # # # + [markdown] deletable=true editable=true # **Testing**: `python -m unittest exercise3.FluxTest` # + deletable=true editable=true algolab.run(FluxTest) # + [markdown] deletable=true editable=true # **Usage examples** # + deletable=true editable=true print flux(0) # + deletable=true editable=true print flux(1) # + deletable=true editable=true print flux(2) # + deletable=true editable=true print flux(3) # + [markdown] deletable=true editable=true # #
exam-2017-02-16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Loss Functions # # When fitting data to labels we need to measure the degree of goodness of fit. This sounds obvious but isn't quite so straightforward. In fact, there are entire fields of statistics that focus solely on that (e.g. robust statistics). In this notebook we'll discuss a number of ways how we can measure whether our model is doing well. As a side-benefit, we'll get to know the loss function layers in ``gluon``. We begin with our default import ritual. import mxnet as mx import mxnet.gluon as gluon from mxnet import nd, autograd import matplotlib.pyplot as plt import numpy as np import mxnet.autograd as ag import math mx.random.seed(1) # # Regression # # ## L1 loss # # As we discussed in the introduction, regression describes the cases where we want to estimate some real valued number $f(x) \in \mathbb{R}$ to match an observation $y$. A natural idea of measuring the distance would be to compute $|y - f(x)|$. This makes sense, e.g. if we need to estimate how much it might cost to manufacture a product: if we estimate too low, we will incur a loss due to underestimation. If we overprice it, we will sell fewer products (here we're making the unrealistic assumption that both are equally bad). In math, the loss function is # # $$l(y,f) = |y-f|$$ # # Let's compute it with ``gluon`` and also its gradient. # + loss = gluon.loss.L1Loss() # getting data ready output = nd.arange(-5,5,0.01) output.attach_grad() # we need the gradient thelabel = nd.zeros_like(output) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='L1 loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of L1 loss') plt.legend() plt.show() # - # Before we move on to other losses, let's quickly consider what happens if we want to minimize the L1 loss. Consider the toy example where we have a number of labels $y_i$ and we want to fit *all* of them to a single scalar, say $f$. In this case we need to solve the minimization problem: # # $$\mathop{\mathrm{minimize}}_f \sum_i |y_i - f|$$ # # As we saw above, the gradient is either -1 or 1. Hence, for the gradients to the left and to the right of $f$ to cancel out we need *the same number of $y_i$* on either side. This is the definition of the *median*. Hence, minimizing the L1 loss means that we are computing the median (at least for constant predictions). In general, the L1 loss is very robust against outliers, since the gradients can never get too large. # # ## L2 loss # # Taking the squared distance between observation and estimate tends to be the default choice in many problems. Often for convenience we multiply this loss by a factor of $\frac{1}{2}$ to ensure that the derivatives look pretty. Here's the loss: # # $$l(y,f) = \frac{1}{2} (y-f)^2$$ # # For vectorial $f$ and $y$ this is the squared Euclidean distance between points. The L2 loss has a few other nice properties. By a similar argument as before we can see that $\sum_{i=1}^m \frac{1}{2} (y_i - f)^2$ is minimized by choosing $f = \frac{1}{m} \sum_{i=1}^m y_i$, i.e. by choosing the mean. Let's see what it looks like in practice. # + loss = gluon.loss.L2Loss() with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='L2 loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of L2 loss') plt.legend() plt.show() # - # ## Huber's Robust loss # # Huber's Robust Loss is a cross between the L1 and the L2 loss. It behaves like an L2 loss close to zero. Beyond that, for discrepancies larger than $\rho$ it behaves like an L1 loss. The scaling is set up in such a way as to ensure that the derivative is continuous. # # $$l(y,f) = \begin{cases} # \frac{1}{2 \rho} (y-f)^2 & \text{ for } |y-f| < \rho \\ # |y-f| - \frac{\rho}{2} & \text{ otherwise} # \end{cases}$$ # # If we minimize the loss something interesting happens (again, we're in the toy scenario that we just estimate a scalar). The number of cases with $y_i < f-\rho$ and with $y_i > f+\rho$ are going to cancel out, since their gradients are all $-1$ and $1$ respectively. For all the $y_i$ closer to $f$, the gradients will balance out like in the L2 loss case. In other words, $f$ will be the mean for all points closer than $\rho$. This is pretty much what a *trimmed mean* estimator does. It ensures that a few outliers (very large $|y_i|$) won't break the estimate. Let's check it out in practice. # + loss = gluon.loss.Huber(rho=0.5) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Huber loss 0.5') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of Huber loss 0.5') # and now for the same loss function with rho=1.0, the default loss = gluon.loss.Huber() with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Huber loss 1') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of Huber loss 1') plt.legend() plt.show() # - # ## Quantile Regression # # In most cases we want to find an output $y$ which is in some way maximal for a given $x$, e.g. the one with the smallest amount of variance, the most likely one, etc. But there are cases where this isn't quite the most desirable thing: imagine that we want to build a tool for physicians to assess whether a child is of normal height. *normal* is obviously relative - relative to age, gender, the ethnic background of the parents, etc. While a good physician might have a good intuition, it would be great if we could quantify this. That is exactly what *quantile regression* does. It aims to estimate some output $f(x)$ such that $\Pr(y \leq f(x)|x) = \tau$ for some quantile $\tau$. This allows us to trace quantile curves for all sorts of probabilities, such as the table below computed by the CDC. # # ![](img/growth-2-20-girls.png) # # To calculate such a table we can use a skewed loss function. Statisticians often call it a 'pinball loss', since it looks like the levers on a pinball machine. Basically it's an L1 loss that has been tilted to one side or another. # # $$l(y,f) = \begin{cases} # \tau (y-f) & \text{ for } f<y \\ # (1-\tau) (f-y) & \text{ otherwise} # \end{cases}$$ # # Depending on how far we tilt this loss, we end up with a loss function that underweights (small $\tau$) or overweights (large $\tau$) errors on the left or on the right. # + loss = gluon.loss.Quantile(tau=0.2) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Quantile loss 0.2') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of Quantile loss 0.2') # and now for the same loss function with tau = 0.6 loss = gluon.loss.Quantile(tau=0.6) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Quantile loss 0.6') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of Quantile loss 0.6') plt.legend() plt.show() # - # ## $\epsilon$ Insensitive Loss # # In some cases we do not care about small deviations from the truth. More to the point, we do not care about deviations up to $\epsilon$. Beyond that, we might care in a linear fashion. For instance, a screw might have a tolerance of $\epsilon$ and the work to make anything fit beyond that would be linear in the diameter of the screw (yes, it's a contrived example). The associated loss function (described in detail by a paper by Vapnik, Golovich and Smola, 1995) is given by: # # $$l(y,f) = \mathrm{max}(0, |y-f| - \epsilon)$$ # # As you can see, it contains a region $[y-\epsilon, y+\epsilon]$ where the derivative vanishes. Outside that range it is constant. # + loss = gluon.loss.EpsilonInsensitive(epsilon=0.5) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Epsilon-insensitive loss 0.5') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of Epsilon-insensitive loss 0.5') plt.legend() plt.show() # - # ## LogCosh Loss # # An obscure variant among loss functions is the LogCosh loss. The key idea is to smooth out the L1 loss such that the loss becomes continuously differentiable even at $0$. This is accomplished by computing the softmax between $y-f$ and $f-y$, i.e. to compute $\log \cosh (y-f)$. The results are exactly as expected. Note that to compute it, we use a numerically stable variant $\log \cosh x = |x| + \log (1+ \exp(-2x))/2$. This ensures that large values of $x$ do not lead to divergent terms. # + loss = gluon.loss.LogCosh() with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='LogCosh loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of LogCosh loss') plt.legend() plt.show() # - # ## Poisson # # In some cases the regression problem does not have to deal with continuous values that could be both positive or negative, but rather with *integer counts*. For instance, the number of rain drops per square meter in a given time, the number of meteorites hitting Antarctica per day, the number of Prussian soldiers that were hit by horses per week, etc. can be useful numbers to estimate. However, it is equally clear that a real valued estimate is useless: we never have 1.3 meteorites. It's only $0, 1, 2, 3, \ldots$ or some other number. Consequently, we need a different loss function. Fortunately the Poisson distribution fits the bill quite well. In it, we assume that # # $$p(y|f) = \frac{1}{y!} \exp(y f - \exp(f)) \text{ and } l(y,f) = - \log p(y|f).$$ # # In many cases one uses an equivalent formulation with rate parameter $\lambda = \exp(f)$ such that we get # $p(y|\lambda) = \frac{1}{y!} \lambda^y e^{-\lambda}$. Note that this is entirely equivalent. The only problem with the $\lambda$-parametrization is that $\lambda$ must be non-negative, whereas $f$ can assume arbitrary values. **Unlike Keras and PyTorch, Gluon uses the exponential formulation**. # # By design, the loss function vanishes for $y = \exp(f)$, as can be seen in the graph below (this is one of the reasons why sometimes the $\lambda$ parametrization is preferable). # + loss = gluon.loss.Poisson() with ag.record(): # start recording theloss = loss(output, 10 * nd.ones_like(output)) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Poisson loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of Poisson loss') plt.legend() plt.show() # + # this implements an L2 norm triplet loss # max(margin + |f1 - f2|^2 - |f1-f3|^2, 0) per observation def TripletLoss(f1, f2, f3): margin = 1 loss = nd.sum((f1-f2)**2 - (f1-f3)**2, axis=1) + 1 loss = nd.maximum(loss, nd.zeros_like(loss)) return loss loss = TripletLoss #with ag.record(): # start recording # theloss = loss(output, nd.ones_like(output)) #theloss.backward() # and compute the gradient #plt.plot(output.asnumpy(), theloss.asnumpy(), label='Huber Loss') #plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient') #plt.legend() #plt.show() f1 = nd.random_normal(shape=(5,10)) f2 = nd.random_normal(shape=(5,10)) f3 = nd.random_normal(shape=(5,10)) theloss = loss(f1, f2, f3) print(theloss) # - # # Classification # ## Logistic Regression # # Next consider the case where we have two labels, say ``cat`` and ``dog``. Since statisticians (and computers) don't like strings, we simplify this to $y \in \{\pm 1\}$. One way of mapping real numbers in $\mathbb{R}$ into class probabilities is to use a sigmoid function. # # $$p(y|f) = \frac{1}{1 + \exp(-y f)} \text{ and hence } -\log p(y|f) = \log(1 + \exp(-y f))$$ # # *Side remark for math nerds:* To keep the term numerically stable we can rewrite it as $-yf + \log(1 + \exp(yf))$ whenever $yf < 0$. The reason for doing this is to avoid exponentiating a large positive number which would trigger a numerical overflow. Combining both expressions we get the following expression: $\log(1 + \exp(-|yf|)) - \delta(yf < 0) \cdot yf$. As we can see, the probabilities converge to 0 and 1 respectively for extreme values of $f$. # + loss = gluon.loss.Logistic() # getting data ready thelabel = nd.ones_like(output) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Logistic loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of logistic loss') # now compute the loss for y=-1 with ag.record(): # start recording theloss = loss(output, -thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Logistic loss for y=-1') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of loss for y=-1') plt.legend() plt.show() # - # ## Soft Margin Loss # # Note that the logistic loss isn't the only loss that one might encounter. For instance, in Support Vector Machines we have a soft-margin loss. It is $0$ whenever data is correctly classified with some confidence, say $y f(x) > 1$. Otherwise we impose a linear penalty. In math this amounts to # # $$l(y,f) = \mathrm{max}(0, 1- yf)$$ # # In some cases we want to square this loss function. Quite unsurprisingly, the counterpart to `SoftMargin` is called `SquaredSoftMargin`. # + loss = gluon.loss.SoftMargin() with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Soft margin') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient') # now compute the loss for y=-1 theloss = loss(output, -thelabel) plt.plot(output.asnumpy(), theloss.asnumpy(), label='Soft margin for y=-1') plt.legend() plt.show() # - # ## Exponential Loss # # In some cases we *really* want to ensure that things are classified correctly we might replace $\log(1 + \exp(-yf))$ for its exponential counterpart, i.e. $\exp(-yf)$. For instance, AdaBoost can be proven to minimize this loss function when it progressively weighs incorrectly classified data in an exponential way (as an aside, for two loss functions $l_1$ and $l_2$, the gradient $\partial_w l_1(x,f(x))$ and $c \partial_w l_2(x, f(x))$ are identical if $l_1 = c l_2$, hence changing the loss function or reweighting the data are equivalent). No matter, the loss function is available in 'MxNet Gluon' and it implements # # $$l(y, f) = \exp(-y f)$$ # + loss = gluon.loss.Exponential() # getting data ready thelabel = nd.ones_like(output) with ag.record(): # start recording theloss = loss(output, thelabel) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='Logistic loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient of logistic loss') plt.legend() plt.show() # - # ## Langford's VW loss # # One of the more unusual loss functions is <NAME>'s VW style loss. It is essentially a cut variant of Huber's robust loss, and it works by piecing together a linear, quadratic and constant part of a loss function. The benefit of this choice is that its gradient is bounded for significant misclassification, that its gradient vanishes for highly confident classification and that there is a graduation in terms of how poorly classified data is. We have # # $$l(y,f) = \begin{cases} # 0 & \text{ if } 1 < y f \\ # \frac{1}{2} f^2 & \text { if } 0 \leq yf \leq 1 \\ # \frac{1}{2}-yf & \text{ otherwise} # \end{cases}$$ # + loss = gluon.loss.Langford() with ag.record(): # start recording theloss = loss(output, nd.ones_like(output)) theloss.backward() # and compute the gradient plt.plot(output.asnumpy(), theloss.asnumpy(), label='VW style loss') plt.plot(output.asnumpy(), output.grad.asnumpy(), label='Gradient') # now compute the loss for y=-1 theloss = loss(output, -thelabel) plt.plot(output.asnumpy(), theloss.asnumpy(), label='VW style loss for y=-1') plt.legend() plt.show() # - # # Multiclass Classification # ## Multiclass Softmax # # One way of dealing with multiple classes is to turn it into $n$ binary classification problems. That is, we simply test: 'is it class 1', 'is it class 2', ... 'is it class n'. In theory this sounds like a splendid idea. After all, this should be just as easy as determining which class it is. Unfortunately, that's not quite the case. Imagine the situation where none of the $n$ classifiers wants to take responsibility. Or imagine the case where more than one claims that it's his turn. Obviously there has to be a better way. Indeed, there is. # # If we have a vector $f \in \mathbb{R}^n$ of scores, where the coordinate, say $f_i$ is large whenever we think that the correct class is $i$, then we can map $f$ into a probability vector via # # $$p(y=i|f) \propto \exp(f_i) \text{ and hence } p(y=i|f) = \frac{\exp(f_i)}{\sum_j \exp(f_j)}$$ # # Here the normalization by $\sum_j \exp(f_j)$ is needed such that all the terms sum up to 1. Consequently the negative log-likelihood $-\log p(y|f)$, i.e. the quantity that we would want to minimize in this case is given by # # $$-\log p(y=i|f) = \log \left[\sum_{j} \exp(f_j)\right] - f_i$$ # # In ``gluon`` the relevant function is [mxnet.gluon.loss.SoftmaxCrossEntropyLoss](http://mxnet.incubator.apache.org/api/python/gluon/loss.html#mxnet.gluon.loss.SoftmaxCrossEntropyLoss). Let's check that this is correct. # + loss = gluon.loss.SoftmaxCrossEntropyLoss() f = nd.random_normal(shape=(1,10)) y = nd.array([4]) #class 4 is true print('Softmax loss is {}.'.format(loss(f,y).asscalar())) # now compute this by hand p = nd.exp(f) p = p / nd.sum(p) print('Class 4 has negative log-likelihood {}.'.format(-nd.log(p[0,4]).asscalar())) # - # The softmax loss has a rather nice property that is worth pointing out: its gradient is given by the difference between the conditional class probabilities $p(y=i|f)$ and the indicator vector $e_j$. This can be derived via # # $$\partial_{f_i} \log \sum_j \exp(f_j) = \frac{\exp(f_i)}{\sum_j \exp(f_j)} = p(y=i|f)$$ # # Such a result seems to be too good to be true by chance. In fact, it holds for *every* member of a larger family of distributions, called the [Exponential Family](https://en.wikipedia.org/wiki/Exponential_family). More specifically, the derivative of the associated normalization is the expected value of the associated embedding. # ## MaxMargin Loss # # The soft-margin loss function allowed us to distinguish between two classes with a margin of separation. That is, as long as $y f(x) \geq 1$ we incurred no loss, whereas for smaller values of the margin (and for misclassifications) a loss is incurred. The obvious question is how to generalize this to more than two classes. One possibility is to treat things as many binary classification problems, but this is a bad idea, since tie-breaking can be tricky. An alternative is to require that the correct class be recognized with a safe margin relative to all the other classes as follows: $f(x,y) \geq f(x,y') + 1$ for all $y' \neq y$. Clearly this would do the trick, and we can design a loss function via # # $$l(y,f) = \mathrm{max}\left[0, \mathrm{max}_{y' \neq y} \left[f(x,y') - f(x,y) + 1\right]\right]$$ # # This looks awkward since we have two nested maxima (the outer one is needed to ensure that we don't get negative values for our loss function). A cleaner (albeit slightly wasteful) way of writing this out is to define some function $\Delta(y,y')$ where $\Delta(y,y') = 1$ if $y \neq y'$ and $\Delta(y,y) = 0$. This is a 0-1 loss. In this case the above equation can be rewritten as: # # $$l(y,f) = \mathrm{max}_{y'} \left[f(x,y') - f(x,y) + \Delta(y,y')\right]$$ # # Note that the function $l$ is convex in $f$ (once upon a time when people were using kernels this was a big deal since it meant that the entire optimization problem was convex ...). More importantly for us here is the fact that we now have a parameter, the loss $\Delta$ and an obvious question is what would happen if we changed it a bit. # Let's take some intuition from the real world. Assume that you're driving on a road with a steep cliff on one side and an incline on the other. # # ![](img/road-cliff.jpg) # # Any sensible driver will try to stay as far away from the cliff while hugging the shoulder that corresponds to the incline. This is the case since mistakes on the incline are much more benign (scratched rims) than those on the steep cliff (likely death). In other words, a good driver will pick a margin between alternatives that is commensurate with the cost of making a mistake. [Taskar, Guestrin and Koller](http://dl.acm.org/citation.cfm?id=2981349) (TKG) in 2003 realized the same thing and decided to make $\Delta$ cost sensitive (they did lots of other things related to dynamic programming). The result is that the very same loss function as above now allows for misclassification-dependent confidence margins. Obviously this is something that we would also want in our machine learning arsenal. Enter `MaxMargin`. By default it uses the 0-1 loss above (and it automagically infers the size), but if you provide it with a suitable matrix `delta`, it will use the latter. # # + # plain vanilla loss loss = gluon.loss.MaxMargin() # some classes (4 class problem) label = nd.array([1,3,2]) output = nd.random_normal(shape=(3,4)) print('Function values for 3 problems {}'.format(output)) theloss = loss(output, label) print('Loss function values {}'.format(theloss)) print('Instantiated loss matrix {}'.format(loss._delta)) # now make things more interesting by changing the loss matrix delta = nd.array(loss._delta) #call copy constructor delta[0,3] = 4 delta[1,3] = 4 delta[2,3] = 4 loss = gluon.loss.MaxMargin(delta) print('Instantiated loss matrix {}'.format(loss._delta)) print('Function values for 3 problems {}'.format(output)) theloss = loss(output, label) print('Loss function values {}'.format(theloss)) # - # # Information Theory Primer # # ## Entropy # # Sometimes we care about probabilities rather than just labels. In particular, we might want to measure the distance between distributions. For that we need some basics about probabilities, such as the [entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)). In a nutshell, the entropy of a random variable is the amount of surprise we encounter each time we sample from it. For instance, the entropy of the constant function is zero, since we already know what's coming. The entropy of a fair coin being tossed is 1 bit. We have no idea what's going to happen (it's a fair coin after all) and there are only two possible outcomes. If we had a biased coin, e.g. one that produces heads with probability 0.9 and tails with probability 0.1, the surprise would be less (after all, most of the time we see a head). Correspondingly its entropy should be lower. On the other hand, a dice with 6 possible outcomes should have a higher degree of surprise. Without further ado, let's define the entropy function: # # $$H[p] := \sum_x -p(x) \log p(x)$$ # # This works well for discrete outcomes. For densities we use # # $$H[p] := \int -p(x) \log p(x) dx$$ # # We can check that for a fair coin the entropy is given by $H[p] = -2 \cdot 0.5 \log 0.5 = \log 2$. Information theorists often measure the information in 'nats' rather than bits. The difference is the base of the logarithm. It's easy to convert: 1 nat is $\log_2 e \approx 1.44$ bit. More generally, for a uniform distribution over $N$ outcomes it is $H[p] = \log N$. One of the fundamental theorems in information theory is that for a distribution $p$, we need at least $H[p]$ nats to encode it. # # There are a number of useful properties for the entropy that are employed in machine learning: # # * Often when estimating distributions we want to find the one with the largest entropy that fits the requirements. That's in line with our desire to restrict our estimates as little as possible beyond what we actually observe. # * The entropy is a concave function. That is, for two distributions $p$ and $q$, the mixture of both has higher entropy: $H[\lambda p + (1-\lambda) q] \geq \lambda H[p] + (1-\lambda) H[q]$. To prove this, simply note that the function $-x \log x$ is concave. # * When we have independent random variables, say $x$ and $y$, then the entropy of the joint distribution is the sum of the individual entropies. This follows simply from the fact that $\log p(x) q(y) = \log p(x) + \log q(y)$. # * For dependent random variables the joint entropy is lower than that of the individual terms. This can be seen as follows: # # $$\begin{eqnarray} # H[p(x,y)] = & \int -p(x,y) \log p(x,y) \\ # = & \int -p(x,y) [\log p(x) p(y)] dx dy + \int p(x,y) \log \frac{p(x) p(y)}{p(x,y)} dx dy \\ # \leq & H[p(x)] + H[p(y)] + \log \int p(x,y) \frac{p(x) p(y)}{p(x,y)} dx dy \\ # = & H[p(x)] + H[p(y)] # \end{eqnarray}$$ # # Here the inequality follows from the fact that $\log x$ is a concave function, hence the expectation of the logarithm is less than the logarithm of the expectation. Intuitively this result is straightforward - if $x$ and $y$ are dependent on each other, then knowing $x$ should tell us some more about $y$. Therefore, the joint entropy of $x$ and $y$ should be lower than the sum of the individual entropies. # # This leads us to the notion of mutual information. It is given by the difference between joint and and independent entropies, i.e. $I(x,y) := H[p(x)] + H[p(y)] - H[p(x,y)]$. Basically it's the amount of information that we save. For instance, a light switch and a (functioning) light bulb are strongly correlated - knowing one tells us all about the other. The entropy of the joint is 1 bit (if it's on with probability 0.5), but the sum of the entropies of switch and bulb individually is 2 bit. Hence the mutual information is 1 bit. # # ## Kullback Leibler Divergence # # This brings us to the KL divergence. It measures how close two distributions are. One way of defining such a quantity is to ask how many extra bits one would have to spend to encode data drawn from $p$ when using a code tuned for $q$. If we assume for a fact that it takes $-\log p(x)$ nat to optimally encode $x$, then the penalty from using the 'wrong' code is given by # # $$D(p\|q) = \sum_x p(x) [\log p(x) - \log q(x)]$$ # # For densities the quantity is defined analogously, i.e. $\int p(x) [\log p(x) - \log q(x)] dx$. The first thing to prove is that this is actually a distance. For that we need to show that $D(p\|q) \geq 0$ with equality only for $p=q$. To see the latter, simply plug $p=q$ into the definition. To see the former, we rewrite $D$ in the same way as above, using convexity, this time of $-\log x$. # # $$D(p\|q) = \sum_x -p(x) \log \frac{q(x)}{p(x)} \geq -\log \sum_x p(x) \frac{q(x)}{p(x)} = 0$$ # # As an aside, to see that $H[p]$ can be achieved, indeed, quantize all $p(x)$ into bins of the next largest fraction of $2$, e.g. $0.2$ goes into the bin of $\frac{1}{4}$. It is clear that the sum over those bins is no smaller than $1$ and no larger than $2$. Moreover, we can arrange them into a tree, where at level $l$ the bins are of size $2^{1-l}$. Then we simply index these bins according to their position of the tree. Each $x$ will require $\lceil \log_2 p(x) \rceil$ bits (whatever is left over, we simply discard). In sum this is no more than $\log_2 H[p] + 1$. To tighten the bound, simply send $N$ symbols. Since they can be encoded using at most $N \log_2 H[p] + 1$ bit, the code becomes increasingly efficient with only $1/N$ waste. This proves that such a code can be found. That it's impossible to do any better is a consequence of $D(p\|q) \geq 0$. # Note that our construction relied on very long codes for efficiency. This is a real problem in practice. [Turbo codes (https://en.wikipedia.org/wiki/Turbo_code) are one of the techniques to address this, e.g. for mobile communications. # # After this long detour, let's finally get to the KL divergence as a loss function. It generalizes the multiclass softmax as follows: instead of having just a single possible true class, it uses a probability distribution as reference. That is # # $$\log(f,y) = \sum_i y_i (log(y_i) - f_i)$$ # # Here $f_i$ is assume to be a probability distribution (or we can set a flag to transform the output into one beforehand). # + loss = gluon.loss.KLDivLoss() # generate some random probability distribution f = nd.random_normal(shape=(1,10)) p = nd.exp(f) p = p / nd.sum(p) # generate some target distribution y = nd.random_normal(shape=(1,10)) y = nd.exp(y) y = y / nd.sum(y) z = nd.zeros_like(y) z[0,3] = 1 # distance between our estimate p and the 'true' distribution y print(loss(nd.log(p), y)) # distance to itself - should be zero print(loss(nd.log(p), p)) # equivalent of logistic loss with class 3 up to normalization over domain, i.e. 1/10 # note that this is VERY DIFFERENT from information theory but a traditional choice # in deep learning print(loss(nd.log(p), z)) print(-nd.log(p[0,3])) # - # ## KL Divergence Estimator # # Loss functions can also be used for other purposes, such as estimating interesting properties about a distribution. For instance, we might want to *estimate* the KL divergence between two distributions directly. Unfortunately this is difficult, since it requires density estimation and even the ratio between two densities $p(x)/q(x)$ to begin with. A rather neat trick was suggested by [<NAME> and Jordan](http://dept.stat.lsa.umich.edu/~xuanlong/Papers/Nguyen-Wainwright-Jordan-aos09.pdf) (NWJ) in 2009 when they realized that convex duality can be used to estimate such quantities rather directly. Before we dive in, we briefly need to explain what the Fenchel-Legendre dual of a function is: # # $$F^*(z) = \mathrm{sup}_x x^\top z - F(x)$$ # # $f^*$ basically compares a line with slope $z$ to the function $F$ and measures the smallest distance to that line. It has the neat property that its dual is the function itself, i.e. $F^{**} = F$, provided that the function is convex and well-behaved. NWJ used this to derive estimators of the [F-divergence](https://en.wikipedia.org/wiki/F-divergence) between distributions. The latter is defined as # # $$D_F(p\|q) := \int dq(x) F\left(\frac{p(x)}{q(x)}\right)$$ # # Plugging in duality, we can rewrite this as an optimization problem in terms of $F^*$ (remember, the dual of the dual is the original function, at least for well-behaved convex ones). That is, we obtain # # $$\begin{eqnarray} # D_F(p\|q) = & \int dq(x) \sup_G \left[\frac{p(x)}{q(x)} G(x) - F^*(G(x))\right] \\ # =& \sup_G \left[\int dp(x) G(x) - \int dq(x) F^*(G(x))\right] # \end{eqnarray}$$ # # Skipping over details of when and whether this is possible in general, we now have the difference in expectations over two different function - $G(x)$ and $F^*(G(x))$ for two different distributions, namely $p$ and $q$. These can be replaced by empirical estimates (aka sample averages) and it now looks very much like a classification problem, albeit with a weird kind of loss function. In particular, the KL-divergence has $F(x) = x \log x$. After quite some algebra, and a substitution due to the fact that $G$ needs to be non-negative, which lets us set $G(x) = \exp(g(x))$ we arrive at the following problem: # # $$D_F(p\|q) = \sup_g \left[\int dp(x) \exp(g(x)) - \int dq(x) (g(x) + 1)\right]$$ # # This looks just like a classification problem with a weird loss function and with $p$ and $q$ substituted for classes $-1$ and $1$. The 'loss function' `DualKL` in `Gluon` provides this functionality. # # + # we broke the output data previously output = nd.arange(-5,5,0.01) output.attach_grad() # we need the gradient loss = gluon.loss.DualKL() lossp = loss(output, -nd.ones_like(output)) lossq = loss(output, nd.ones_like(output)) plt.plot(output.asnumpy(), lossp.asnumpy(), label='Loss for p') plt.plot(output.asnumpy(), lossq.asnumpy(), label='Loss for q') plt.legend() plt.show() # - # ## Relative Novelty Detection # # In some cases estimating a density or even a ratio of densities is not really what we want. Instead, we would like to find the *most typical* or the *most unusual* observation in a dataset. Unfortunately, these things are not really well defined. Before going into measure theory, we need some culinary support. # # |![](img/doughnut.jpg)|![](img/berliner.jpg)| # |:---------------:|:---------------:| # |Doughnut|Jelly Doughnut| # # Now imagine that we have two different pastry-shaped distributions - a doughnut shaped one and one that looks like a jelly doughnut (also called 'Berliner' in Europe). These two couldn't be more different from each other. Any data occurring in the donut hole (or far away in its periphery) is novel, whereas for the jelly doughnut only the data far away is novel. Yet we can transform one into the other, simply by messing with the radius in polar coordinates, e.g. via a new radial coordinate $r' = 1/r$. Hence, what once was novel is now no longer novel since we stretched out the center of the poor jelly doughnut so much that its density becomes infinitesimally low. In mathematics terms, this means that novelty is sensitive to the *measure* of the domain where it's defined. This is bad, since we usually don't know this measure. For a 3D space, there are still assumptions of what is considered reasonable (stretching out a poor jelly doughnut probably is not). But for arbitrary domains (database records, books, images, movies, TCP/IP logs) it's pretty hard to define what is reasonable. However, we all know that something that looks just like what we've seen before is probably reasonable ... but that's just like defining what is novely by saying that it's novel if it looks novel. Ouch! # # Here's a mathematically more sound way: we use data to define an implicit reference measure. E.g. for server logs we could use past data as a reference measure, such that we can ask the question whether something looks out of order relative to what we've seen in the past. Or for images, whether there's one that stands out relative to past images. Or even for pixels within an image. Mathematically this means that we care mostly about $p(x)/q(x)$ whenever $p(x)/q(x)$ is particularly small. For large ratios things are just fine. This is precisely what [<NAME> and Teo](http://proceedings.mlr.press/v5/smola09a/smola09a.pdf) (SLT) in 2009 did in their Relative Novelty Detection paper. They used the same reasoning as NGW but with a different F-divergence function: # # $$F\left(\frac{p(x)}{q(x)}\right) = \mathrm{max}\left(0, \rho - \log \frac{p(x)}{q(x)}\right)$$ # # Here $\rho$ serves as a threshold to decide whether the density ratio is too low. Anything lower than $\exp(\rho)$ is too small. This actually allows us to focus on both the very typical and the very atypical aspects of the data, simply by picking a very small and a very large $\rho$ respectively. Note that for very large $\rho$ this is just the *reverse KL Divergence*, i.e. pretty much the same thing as what NGW were using. Again, skipping over the tedious mathematical details of computing the dual of $F$ and of substituting (we have the same problem of nonnegativity) we arrive at the following loss function: # # $$l(y,f) = \begin{cases} # \exp(f - \rho) & \text{ if } y = -1 \\ # -f-1 & \text{ if } y = 1 \text{ and } f > 0 \\ # \exp(f) & \text{ if } y = 1 \text{ and } f <= 0 # \end{cases}$$ # # 'Training' with this loss function will give us precisely the relative novelty detector that we want. All we need to do now is threshold it at $\rho$ to get the desired output. # + loss = gluon.loss.RelativeNovelty(rho=3) lossp = loss(output, -nd.ones_like(output)) lossq = loss(output, nd.ones_like(output)) plt.plot(output.asnumpy(), lossp.asnumpy(), label='Loss for p') plt.plot(output.asnumpy(), lossq.asnumpy(), label='Loss for q') plt.legend() plt.show() # - # # Exotic Losses # # There are many loss functions that do not fit into the categories of classification and regression. In fact, there are some recent papers that argue that we should do away with loss functions entirely, such as the one by [<NAME>, <NAME> Efros](https://arxiv.org/abs/1611.07004) from 2016. That said, there are quite a few useful loss functions that are in use. # # ## Triplet Loss # # Assume that we want to embed data into a vector space. For instance, assume that we want to find embeddings of faces such that faces of the same person are grouped closely together whereas faces of different people are distant. In math: we want $\|f_a - f_{a'}\|$ to be small for $a$ and $a'$ drawn from the same class, whereas we want $\|f_a - f_b\|$ to be large whenever $a$ and $b$ are from different classes. However, this doesn't really tell us *how small* and *how large* we'd really like these distances to be. There is an easy fix - all we need to do is to enforce that the distances are *relatively large*, namely by some constant $c > 0$. # # $$\|f_a - f_{a'}\| + c \leq \|f_a - f_b\|$$ # # Now we can use the same trick as for soft-margin losses and turn this into a loss function by taking the maximum over the inequality. One last trick is to square the distances such that gradients look nice and we have the triplet loss: # # $$l(f_a, f_{a'}, f_b) = \mathrm{max}(0, c + \|f_a - f_{a'}\|^2 - \|f_a - f_b\|^2)$$ # # Quite unsurprisingly, this is invoked via the `TripletLoss` class. Its constructor lets us adjust the margin $c$ by which data should be separated. Let's generate some data. # + loss = gluon.loss.TripletLoss(margin=2) # make some data. f1 and f2 are similar, f3 is (hopefully) far away theshape = (5,3) f1 = nd.normal(shape=theshape) f2 = nd.normal(shape=theshape) f3 = nd.normal(shape=theshape) * 5.0 # with the right pair of distances theloss = loss(f1, f2, f3) print(theloss) # these are likely far away in the wrong way, since we blew f3 out of proportions theloss = loss(f1, f3, f2) print(theloss) # - # Todo - many hierarchical losses, pairwise loss with margin, minimum error decoder, etc.
proto-P02-C02.6-loss.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyTorch Assignment: Multi-Layer Perceptron (MLP) # **[Duke Community Standard](http://integrity.duke.edu/standard.html): By typing your name below, you are certifying that you have adhered to the Duke Community Standard in completing this assignment.** # # Name: # ### Multi-Layer Perceptrons # # The simple logistic regression example we went over in the previous notebook is essentially a one-layer neural network, projecting straight from the input to the output predictions. # While this can be effective for linearly separable data, occasionally a little more complexity is necessary. # Neural networks with additional layers are typically able to learn more complex functions, leading to better performance. # These additional layers (called "hidden" layers) transform the input into one or more intermediate representations before making a final prediction. # # In the logistic regression example, the way we performed the transformation was with a fully-connected layer, which consisted of a linear transform (matrix multiply plus a bias). # A neural network consisting of multiple successive fully-connected layers is commonly called a Multi-Layer Perceptron (MLP). # In the simple MLP below, a 4-d input is projected to a 5-d hidden representation, which is then projected to a single output that is used to make the final prediction. # # <img src="Figures/MLP.png" width="300"/> # # For the assignment, you will be building a MLP for MNIST. # Mechanically, this is done very similary to our logistic regression example, but instead of going straight to a 10-d vector representing our output predictions, we might first transform to a 500-d vector with a "hidden" layer, then to the output of dimension 10. # Before you do so, however, there's one more important thing to consider. # # ### Nonlinearities # # We typically include nonlinearities between layers of a neural network. # There's a number of reasons to do so. # For one, without anything nonlinear between them, successive linear transforms (fully connected layers) collapse into a single linear transform, which means the model isn't any more expressive than a single layer. # On the other hand, intermediate nonlinearities prevent this collapse, allowing neural networks to approximate more complex functions. # # There are a number of nonlinearities commonly used in neural networks, but one of the most popular is the [rectified linear unit (ReLU)](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)): # # \begin{align} # x = \max(0,x) # \end{align} # # There are a number of ways to implement this in PyTorch. # We could do it with elementary PyTorch operations: # + import torch x = torch.rand(5, 3)*2 - 1 x_relu_max = torch.max(torch.zeros_like(x),x) print("x: {}".format(x)) print("x after ReLU with max: {}".format(x_relu_max)) # - # Of course, PyTorch also has the ReLU implemented, for example in `torch.nn.functional`: # + import torch.nn.functional as F x_relu_F = F.relu(x) print("x after ReLU with nn.functional: {}".format(x_relu_F)) # - # Same result. # ### Assignment # # Build a 2-layer MLP for MNIST digit classfication. Feel free to play around with the model architecture and see how the training time/performance changes, but to begin, try the following: # # Image (784 dimensions) -> # fully connected layer (500 hidden units) -> nonlinearity (ReLU) -> # fully connected (10 hidden units) -> softmax # # Try building the model both with basic PyTorch operations, and then again with more object-oriented higher-level APIs. # You should get similar results! # # # *Some hints*: # - Even as we add additional layers, we still only require a single optimizer to learn the parameters. # Just make sure to pass all parameters to it! # - As you'll calculate in the Short Answer, this MLP model has many more parameters than the logisitic regression example, which makes it more challenging to learn. # To get the best performance, you may want to play with the learning rate and increase the number of training epochs. # - Be careful using `torch.nn.CrossEntropyLoss()`. # If you look at the [PyTorch documentation](https://pytorch.org/docs/stable/nn.html#crossentropyloss): you'll see that `torch.nn.CrossEntropyLoss()` combines the softmax operation with the cross-entropy. # This means you need to pass in the logits (predictions pre-softmax) to this loss. # Computing the softmax separately and feeding the result into `torch.nn.CrossEntropyLoss()` will significantly degrade your model's performance! # + ### YOUR CODE HERE # Make sure to print out your accuracy on the test set at the end. # - # ### Short answer # How many trainable parameters does your model have? # How does this compare to the logisitic regression example? # `[Your answer here]`
Duke_DL/notebook/2B_MultiLayer_Perceptron_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D2_ModelingPractice/student/W1D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="hzj-9uW4qDay" # # Neuromatch Academy: Week1, Day 2, Tutorial 2 # + [markdown] colab_type="text" id="33XOc4X_qDbL" # #Tutorial objectives # # We are investigating a simple phenomena, working through the 10 steps of modeling ([Blohm et al., 2019](https://doi.org/10.1523/ENEURO.0352-19.2019)) in two notebooks: # # **Framing the question** # 1. finding a phenomenon and a question to ask about it # 2. understanding the state of the art # 3. determining the basic ingredients # 4. formulating specific, mathematically defined hypotheses # **Implementing the model** # 5. selecting the toolkit # 6. planning the model # 7. implementing the model # **Model testing** # 8. completing the model # 9. testing and evaluating the model # **Publishing** # 10. publishing models # # We did steps 1-5 in Tutorial 1 and will cover steps 6-10 in Tutorial 2 (this notebook). # + [markdown] colab_type="text" id="EbVRzqZBR17L" # # Utilities Setup and Convenience Functions # # Please run the following **3** chunks to have functions and data available. # + cellView="form" colab={} colab_type="code" id="oKP9npTbqDbU" #@title Utilities and setup # set up the environment for this tutorial import time # import time import numpy as np # import numpy import scipy as sp # import scipy from scipy.stats import gamma # import gamma distribution import math # import basic math functions import random # import basic random number generator functions import matplotlib.pyplot as plt # import matplotlib from IPython import display fig_w, fig_h = (12, 8) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) plt.style.use('ggplot') # %matplotlib inline # #%config InlineBackend.figure_format = 'retina' from scipy.signal import medfilt # make # + cellView="form" colab={} colab_type="code" id="-OC-H0O0qDbj" #@title Convenience functions: Plotting and Filtering # define some convenience functions to be used later def my_moving_window(x, window=3, FUN=np.mean): ''' Calculates a moving estimate for a signal Args: x (numpy.ndarray): a vector array of size N window (int): size of the window, must be a positive integer FUN (function): the function to apply to the samples in the window Returns: (numpy.ndarray): a vector array of size N, containing the moving average of x, calculated with a window of size window There are smarter and faster solutions (e.g. using convolution) but this function shows what the output really means. This function skips NaNs, and should not be susceptible to edge effects: it will simply use all the available samples, which means that close to the edges of the signal or close to NaNs, the output will just be based on fewer samples. By default, this function will apply a mean to the samples in the window, but this can be changed to be a max/min/median or other function that returns a single numeric value based on a sequence of values. ''' # if data is a matrix, apply filter to each row: if len(x.shape) == 2: output = np.zeros(x.shape) for rown in range(x.shape[0]): output[rown,:] = my_moving_window(x[rown,:],window=window,FUN=FUN) return output # make output array of the same size as x: output = np.zeros(x.size) # loop through the signal in x for samp_i in range(x.size): values = [] # loop through the window: for wind_i in range(int(-window), 1): if ((samp_i+wind_i) < 0) or (samp_i+wind_i) > (x.size - 1): # out of range continue # sample is in range and not nan, use it: if not(np.isnan(x[samp_i+wind_i])): values += [x[samp_i+wind_i]] # calculate the mean in the window for this point in the output: output[samp_i] = FUN(values) return output def my_plot_percepts(datasets=None, plotconditions=False): if isinstance(datasets,dict): # try to plot the datasets # they should be named... # 'expectations', 'judgments', 'predictions' fig = plt.figure(figsize=(8, 8)) # set aspect ratio = 1? not really plt.ylabel('perceived self motion [m/s]') plt.xlabel('perceived world motion [m/s]') plt.title('perceived velocities') # loop through the entries in datasets # plot them in the appropriate way for k in datasets.keys(): if k == 'expectations': expect = datasets[k] plt.scatter(expect['world'],expect['self'],marker='*',color='xkcd:green',label='my expectations') elif k == 'judgments': judgments = datasets[k] for condition in np.unique(judgments[:,0]): c_idx = np.where(judgments[:,0] == condition)[0] cond_self_motion = judgments[c_idx[0],1] cond_world_motion = judgments[c_idx[0],2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'world-motion condition judgments' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'self-motion condition judgments' else: c_label = 'condition [%d] judgments'%condition plt.scatter(judgments[c_idx,3],judgments[c_idx,4], label=c_label, alpha=0.2) elif k == 'predictions': predictions = datasets[k] for condition in np.unique(predictions[:,0]): c_idx = np.where(predictions[:,0] == condition)[0] cond_self_motion = predictions[c_idx[0],1] cond_world_motion = predictions[c_idx[0],2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'predicted world-motion condition' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'predicted self-motion condition' else: c_label = 'condition [%d] prediction'%condition plt.scatter(predictions[c_idx,4],predictions[c_idx,3], marker='x', label=c_label) else: print("datasets keys should be 'hypothesis', 'judgments' and 'predictions'") if plotconditions: # this code is simplified but only works for the dataset we have: plt.scatter([1],[0],marker='<',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='world-motion stimulus',s=80) plt.scatter([0],[1],marker='>',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='self-motion stimulus',s=80) plt.legend(facecolor='xkcd:white') plt.show() else: if datasets is not None: print('datasets argument should be a dict') raise TypeError def my_plot_motion_signals(): dt = 1/10 a = gamma.pdf( np.arange(0,10,dt), 2.5, 0 ) t = np.arange(0,10,dt) v = np.cumsum(a*dt) fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(14,6)) fig.suptitle('Sensory ground truth') ax1.set_title('world-motion condition') ax1.plot(t,-v,label='visual [$m/s$]') ax1.plot(t,np.zeros(a.size),label='vestibular [$m/s^2$]') ax1.set_xlabel('time [s]') ax1.set_ylabel('motion') ax1.legend(facecolor='xkcd:white') ax2.set_title('self-motion condition') ax2.plot(t,-v,label='visual [$m/s$]') ax2.plot(t,a,label='vestibular [$m/s^2$]') ax2.set_xlabel('time [s]') ax2.set_ylabel('motion') ax2.legend(facecolor='xkcd:white') plt.show() def my_plot_sensorysignals(judgments, opticflow, vestibular, returnaxes=False, addaverages=False): wm_idx = np.where(judgments[:,0] == 0) sm_idx = np.where(judgments[:,0] == 1) opticflow = opticflow.transpose() wm_opticflow = np.squeeze(opticflow[:,wm_idx]) sm_opticflow = np.squeeze(opticflow[:,sm_idx]) vestibular = vestibular.transpose() wm_vestibular = np.squeeze(vestibular[:,wm_idx]) sm_vestibular = np.squeeze(vestibular[:,sm_idx]) X = np.arange(0,10,.1) fig, my_axes = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(15,10)) fig.suptitle('Sensory signals') my_axes[0][0].plot(X,wm_opticflow, color='xkcd:light red', alpha=0.1) my_axes[0][0].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[0][0].plot(X,np.average(wm_opticflow, axis=1), color='xkcd:red', alpha=1) my_axes[0][0].set_title('world-motion optic flow') my_axes[0][0].set_ylabel('[motion]') my_axes[0][1].plot(X,sm_opticflow, color='xkcd:azure', alpha=0.1) my_axes[0][1].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[0][1].plot(X,np.average(sm_opticflow, axis=1), color='xkcd:blue', alpha=1) my_axes[0][1].set_title('self-motion optic flow') my_axes[1][0].plot(X,wm_vestibular, color='xkcd:light red', alpha=0.1) my_axes[1][0].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[1][0].plot(X,np.average(wm_vestibular, axis=1), color='xkcd:red', alpha=1) my_axes[1][0].set_title('world-motion vestibular signal') my_axes[1][0].set_xlabel('time [s]') my_axes[1][0].set_ylabel('[motion]') my_axes[1][1].plot(X,sm_vestibular, color='xkcd:azure', alpha=0.1) my_axes[1][1].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[1][1].plot(X,np.average(sm_vestibular, axis=1), color='xkcd:blue', alpha=1) my_axes[1][1].set_title('self-motion vestibular signal') my_axes[1][1].set_xlabel('time [s]') if returnaxes: return my_axes else: plt.show() def my_plot_thresholds(thresholds, world_prop, self_prop, prop_correct): plt.figure(figsize=(12,8)) plt.title('threshold effects') plt.plot([min(thresholds),max(thresholds)],[0,0],':',color='xkcd:black') plt.plot([min(thresholds),max(thresholds)],[0.5,0.5],':',color='xkcd:black') plt.plot([min(thresholds),max(thresholds)],[1,1],':',color='xkcd:black') plt.plot(thresholds, world_prop, label='world motion') plt.plot(thresholds, self_prop, label='self motion') plt.plot(thresholds, prop_correct, color='xkcd:purple', label='correct classification') plt.xlabel('threshold') plt.ylabel('proportion correct or classified as self motion') plt.legend(facecolor='xkcd:white') plt.show() def my_plot_predictions_data(judgments, predictions): conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) # self: conditions_self = np.abs(judgments[:,1]) veljudgmnt_self = judgments[:,3] velpredict_self = predictions[:,3] # world: conditions_world = np.abs(judgments[:,2]) veljudgmnt_world = judgments[:,4] velpredict_world = predictions[:,4] fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharey='row', figsize=(12,5)) ax1.scatter(veljudgmnt_self,velpredict_self, alpha=0.2) ax1.plot([0,1],[0,1],':',color='xkcd:black') ax1.set_title('self-motion judgments') ax1.set_xlabel('observed') ax1.set_ylabel('predicted') ax2.scatter(veljudgmnt_world,velpredict_world, alpha=0.2) ax2.plot([0,1],[0,1],':',color='xkcd:black') ax2.set_title('world-motion judgments') ax2.set_xlabel('observed') ax2.set_ylabel('predicted') plt.show() # + cellView="form" colab={} colab_type="code" id="GJG0pGXCqDbu" #@title Data generation code (needs to go on OSF and deleted here) def my_simulate_data(repetitions=100, conditions=[(0,-1),(+1,0)] ): """ Generate simulated data for this tutorial. You do not need to run this yourself. Args: repetitions: (int) number of repetitions of each condition (default: 30) conditions: list of 2-tuples of floats, indicating the self velocity and world velocity in each condition (default: returns data that is good for exploration: [(-1,0),(0,+1)] but can be flexibly extended) The total number of trials used (ntrials) is equal to: repetitions * len(conditions) Returns: dict with three entries: 'judgments': ntrials * 5 matrix 'opticflow': ntrials * 100 matrix 'vestibular': ntrials * 100 matrix The default settings would result in data where first 30 trials reflect a situation where the world (other train) moves in one direction, supposedly at 1 m/s (perhaps to the left: -1) while the participant does not move at all (0), and 30 trials from a second condition, where the world does not move, while the participant moves with 1 m/s in the opposite direction from where the world is moving in the first condition (0,+1). The optic flow should be the same, but the vestibular input is not. """ # reproducible output np.random.seed(1937) # set up some variables: ntrials = repetitions * len(conditions) # the following arrays will contain the simulated data: judgments = np.empty(shape=(ntrials,5)) opticflow = np.empty(shape=(ntrials,100)) vestibular = np.empty(shape=(ntrials,100)) # acceleration: a = gamma.pdf(np.arange(0,10,.1), 2.5, 0 ) # divide by 10 so that velocity scales from 0 to 1 (m/s) # max acceleration ~ .308 m/s^2 # not realistic! should be about 1/10 of that # velocity: v = np.cumsum(a*.1) # position: (not necessary) #x = np.cumsum(v) ################################# # REMOVE ARBITRARY SCALING & CORRECT NOISE PARAMETERS vest_amp = 1 optf_amp = 1 # we start at the first trial: trialN = 0 # we start with only a single velocity, but it should be possible to extend this for conditionno in range(len(conditions)): condition = conditions[conditionno] for repetition in range(repetitions): # # generate optic flow signal OF = v * np.diff(condition) # optic flow: difference between self & world motion OF = (OF * optf_amp) # fairly large spike range OF = OF + (np.random.randn(len(OF)) * .1) # adding noise # generate vestibular signal VS = a * condition[0] # vestibular signal: only self motion VS = (VS * vest_amp) # less range VS = VS + (np.random.randn(len(VS)) * 1.) # acceleration is a smaller signal, what is a good noise level? # store in matrices, corrected for sign #opticflow[trialN,:] = OF * -1 if (np.sign(np.diff(condition)) < 0) else OF #vestibular[trialN,:] = VS * -1 if (np.sign(condition[1]) < 0) else VS opticflow[trialN,:], vestibular[trialN,:] = OF, VS ######################################################### # store conditions in judgments matrix: judgments[trialN,0:3] = [ conditionno, condition[0], condition[1] ] # vestibular SD: 1.0916052957046194 and 0.9112684509277528 # visual SD: 0.10228834313079663 and 0.10975472557444346 # generate judgments: if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,5)*.1)[70:90])) < 1): ########################### # NO self motion detected ########################### selfmotion_weights = np.array([.01,.01]) # there should be low/no self motion worldmotion_weights = np.array([.01,.99]) # world motion is dictated by optic flow else: ######################## # self motion DETECTED ######################## #if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,15)*.1)[70:90]) - np.average(medfilt(OF,15)[70:90])) < 5): if True: #################### # explain all self motion by optic flow selfmotion_weights = np.array([.01,.99]) # there should be lots of self motion, but determined by optic flow worldmotion_weights = np.array([.01,.01]) # very low world motion? else: # we use both optic flow and vestibular info to explain both selfmotion_weights = np.array([ 1, 0]) # motion, but determined by vestibular signal worldmotion_weights = np.array([ 1, 1]) # very low world motion? # integrated_signals = np.array([ np.average( np.cumsum(medfilt(VS/vest_amp,15))[90:100]*.1 ), np.average((medfilt(OF/optf_amp,15))[90:100]) ]) selfmotion = np.sum(integrated_signals * selfmotion_weights) worldmotion = np.sum(integrated_signals * worldmotion_weights) #print(worldmotion,selfmotion) judgments[trialN,3] = abs(selfmotion) judgments[trialN,4] = abs(worldmotion) # this ends the trial loop, so we increment the counter: trialN += 1 return {'judgments':judgments, 'opticflow':opticflow, 'vestibular':vestibular} simulated_data = my_simulate_data() judgments = simulated_data['judgments'] opticflow = simulated_data['opticflow'] vestibular = simulated_data['vestibular'] # + [markdown] colab_type="text" id="2ZS7z43AD4Cz" # #Micro-tutorial 6 - planning the model # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="yBuLADkkDHZx" outputId="57603faa-07c0-4f55-f4eb-7ae7e78e003c" #@title Video: Planning the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='daEtkVporBE', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="WswMF3hala0x" # # ###**Goal:** Identify the key components of the model and how they work together. # # Our goal all along has been to model our perceptual estimates of sensory data. # Now that we have some idea of what we want to do, we need to line up the components of the model: what are the input and output? Which computations are done and in what order? # # The figure below shows a generic model we will use to guide our code construction. # ![Model as code](https://i.ibb.co/hZdHmkk/modelfigure.jpg) # # Our model will have: # * **inputs**: the values the system has available - for this tutorial the sensory information in a trial. We want to gather these together and plan how to process them. # * **parameters**: unless we are lucky, our functions will have unknown parameters - we want to identify these and plan for them. # * **outputs**: these are the predictions our model will make - for this tutorial these are the perceptual judgments on each trial. Ideally these are directly comparable to our data. # * **Model functions**: A set of functions that perform the hypothesized computations. # # >Using Python (with Numpy and Scipy) we will define a set of functions that take our data and some parameters as input, can run our model, and output a prediction for the judgment data. # # #Recap of what we've accomplished so far: # # To model perceptual estimates from our sensory data, we need to # 1. _integrate_ to ensure sensory information are in appropriate units # 2. _reduce noise and set timescale_ by filtering # 3. _threshold_ to model detection # # Remember the kind of operations we identified: # * integration: `np.cumsum()` # * filtering: `my_moving_window()` # * threshold: `if` with a comparison (`>` or `<`) and `else` # # We will collect all the components we've developed and design the code by: # 1. **identifying the key functions** we need # 2. **sketching the operations** needed in each. # # # # + [markdown] colab_type="text" id="1q8NpBn7la02" # **_Planning our model:_** # # We know what we want the model to do, but we need to plan and organize the model into functions and operations. # # We're providing a draft of the first function. # # For each of the two other code chunks, write mostly comments and help text first. This should put into words what role each of the functions plays in the overall model, implementing one of the steps decided above. # # _______ # Below is the main function with a detailed explanation of what the function is supposed to do: what input is expected, and what output will generated. # # The code is not complete, and only returns nans for now. However, this outlines how most model code works: it gets some measured data (the sensory signals) and a set of parameters as input, and as output returns a prediction on other measured data (the velocity judgments). # # The goal of this function is to define the top level of a simulation model which: # * receives all input # * loops through the cases # * calls functions that computes predicted values for each case # * outputs the predictions # + [markdown] colab_type="text" id="lEbkf-gbQVLy" # ### **TD 6.1**: Complete main model function # # The function `my_train_illusion_model()` below should call one other function: `my_perceived_motion()`. What input do you think this function should get? # + [markdown] colab_type="text" id="AqzBfQeISJGH" # **Complete main model function** # + cellView="both" colab={} colab_type="code" id="vjZfSZA5la06" def my_train_illusion_model(sensorydata, params): ''' Generate output predictions of perceived self-motion and perceived world-motion velocity based on input visual and vestibular signals. Args (Input variables passed into function): sensorydata: (dict) dictionary with two named entries: opticflow: (numpy.ndarray of float) NxM array with N trials on rows and M visual signal samples in columns vestibular: (numpy.ndarray of float) NxM array with N trials on rows and M vestibular signal samples in columns params: (dict) dictionary with named entries: threshold: (float) vestibular threshold for credit assignment filterwindow: (list of int) determines the strength of filtering for the visual and vestibular signals, respectively integrate (bool): whether to integrate the vestibular signals, will be set to True if absent FUN (function): function used in the filter, will be set to np.mean if absent samplingrate (float): the number of samples per second in the sensory data, will be set to 10 if absent Returns: dict with two entries: selfmotion: (numpy.ndarray) vector array of length N, with predictions of perceived self motion worldmotion: (numpy.ndarray) vector array of length N, with predictions of perceived world motion ''' # sanitize input a little if not('FUN' in params.keys()): params['FUN'] = np.mean if not('integrate' in params.keys()): params['integrate'] = True if not('samplingrate' in params.keys()): params['samplingrate'] = 10 # number of trials: ntrials = sensorydata['opticflow'].shape[0] # set up variables to collect output selfmotion = np.empty(ntrials) worldmotion = np.empty(ntrials) # loop through trials? for trialN in range(ntrials): #these are our sensory variables (inputs) vis = sensorydata['opticflow'][trialN,:] ves = sensorydata['vestibular'][trialN,:] ######################################################## # generate output predicted perception: ######################################################## #our inputs our vis, ves, and params selfmotion[trialN], worldmotion[trialN] = [np.nan, np.nan] ######################################################## # replace above with # selfmotion[trialN], worldmotion[trialN] = my_perceived_motion( ???, ???, params=params) # and fill in question marks ######################################################## # comment this out when you've filled raise NotImplementedError("Student excercise: generate predictions") return {'selfmotion':selfmotion, 'worldmotion':worldmotion} # uncomment the following lines to run the main model function: ## here is a mock version of my_perceived motion. ## so you can test my_train_illusion_model() #def my_perceived_motion(*args, **kwargs): #return np.random.rand(2) ##let's look at the preditions we generated for two sample trials (0,100) ##we should get a 1x2 vector of self-motion prediction and another for world-motion #sensorydata={'opticflow':opticflow[[0,100],:0], 'vestibular':vestibular[[0,100],:0]} #params={'threshold':0.33, 'filterwindow':[100,50]} #my_train_illusion_model(sensorydata=sensorydata, params=params) # + [markdown] cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="text" id="jNKehhENytY-" outputId="2f59f49a-5dab-4752-d039-d5b05449ce9c" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_685e0a13.py) # # # + [markdown] colab_type="text" id="r0d72X8xla1I" # ### **TD 6.2**: Draft perceived motion functions # # Now we draft a set of functions, the first of which is used in the main model function (see above) and serves to generate perceived velocities. The other two are used in the first one. Only write help text and/or comments, you don't have to write the whole function. Each time ask yourself these questions: # # * what sensory data is necessary? # * what other input does the function need, if any? # * which operations are performed on the input? # * what is the output? # # (the number of arguments is correct) # + [markdown] colab_type="text" id="jZ-QNnMaSzUJ" # **Template perceived motion** # + cellView="both" colab={} colab_type="code" id="Le2UGYSxla1L" # fill in the input arguments the function should have: # write the help text for the function: def my_perceived_motion(arg1, arg2, arg3): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument argument 3: explain the format and content of the third argument Returns: what output does the function generate? Any further description? ''' # structure your code into two functions: "my_selfmotion" and "my_worldmotion" # write comments outlining the operations to be performed on the inputs by each of these functions # use the elements from micro-tutorials 3, 4, and 5 (found in W1D2 Tutorial Part 1) # # # # what kind of output should this function produce? return output # + [markdown] colab_type="text" id="Wjqk_VeVgqDo" # We've completed the `my_perceived_motion()` function for you below. Follow this example to complete the template for `my_selfmotion()` and `my_worldmotion()`. Write out the inputs and outputs, and the steps required to calculate the outputs from the inputs. # # **Perceived motion function** # + cellView="both" colab={} colab_type="code" id="A5WroFU-la1U" #Full perceived motion function def my_perceived_motion(vis, ves, params): ''' Takes sensory data and parameters and returns predicted percepts Args: vis (numpy.ndarray): 1xM array of optic flow velocity data ves (numpy.ndarray): 1xM array of vestibular acceleration data params: (dict) dictionary with named entries: see my_train_illusion_model() for details Returns: [list of floats]: prediction for perceived self-motion based on vestibular data, and prediction for perceived world-motion based on perceived self-motion and visual data ''' # estimate self motion based on only the vestibular data # pass on the parameters selfmotion = my_selfmotion(ves=ves, params=params) # estimate the world motion, based on the selfmotion and visual data # pass on the parameters as well worldmotion = my_worldmotion(vis=vis, selfmotion=selfmotion, params=params) return [selfmotion, worldmotion] # + [markdown] colab_type="text" id="TYzRAst6S56u" # **Template calculate self motion** # Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5. # + cellView="both" colab={} colab_type="code" id="X5Ab9T5DPXuf" def my_selfmotion(arg1, arg2): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument Returns: what output does the function generate? Any further description? ''' # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # 4. # what output should this function produce? return output # + [markdown] cellView="both" colab={} colab_type="text" id="4uB61BGFhpVH" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_181325a9.py) # # # + [markdown] colab_type="text" id="_6JhNTIGTB_o" # **Template calculate world motion** # Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5. # + cellView="both" colab={} colab_type="code" id="AzntazB0P15Y" def my_worldmotion(arg1, arg2, arg3): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument argument 3: explain the format and content of the third argument Returns: what output does the function generate? Any further description? ''' # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # what output should this function produce? return output # + [markdown] cellView="both" colab={} colab_type="text" id="y-hPMkJukHh1" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_8f913582.py) # # # + [markdown] colab_type="text" id="1f2zLMJNGfbb" # #Micro-tutorial 7 - implement model # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="yqKpDcxPEHSP" outputId="7aab5d99-07ae-4470-be41-fbf8008ec53b" #@title Video: implement the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='gtSOekY8jkw', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="_u9ILuKJla1c" # # **Goal:** We write the components of the model in actual code. # # For the operations we picked, there function ready to use: # * integration: `np.cumsum(data, axis=1)` (axis=1: per trial and over samples) # * filtering: `my_moving_window(data, window)` (window: int, default 3) # * average: `np.mean(data)` # * threshold: if (value > thr): <operation 1> else: <operation 2> # # # + [markdown] colab_type="text" id="omhHWHkXugkw" # ###**TD 7.1:** Write code to estimate self motion # # Use the operations to finish writing the function that will calculate an estimate of self motion. Fill in the descriptive list of items with actual operations. Use the function for estimating world-motion below, which we've filled for you! # # **Template finish self motion function** # + cellView="both" colab={} colab_type="code" id="TdoOVsfpla1e" def my_selfmotion(ves, params): ''' Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s ''' ###uncomment the code below and fill in with your code ## 1. integrate vestibular signal #ves = np.cumsum(ves*(1/params['samplingrate'])) ## 2. running window function to accumulate evidence: #selfmotion = YOUR CODE HERE ## 3. take final value of self-motion vector as our estimate #selfmotion = ## 4. compare to threshold. Hint the threshodl is stored in params['threshold'] ## if selfmotion is higher than threshold: return value ## if it's lower than threshold: return 0 #if YOURCODEHERE #selfmotion = YOURCODHERE # comment this out when you've filled raise NotImplementedError("Student excercise: estimate my_selfmotion") return output # + [markdown] cellView="both" colab={} colab_type="text" id="p8hLOrMQo_zA" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_3ea16348.py) # # # + [markdown] colab_type="text" id="rDWyfXQyt2Um" # ### Estimate world motion # # We have completed the `my_worldmotion()` function for you. # # **World motion function** # + cellView="both" colab={} colab_type="code" id="3sWacz4At9Ma" # World motion function def my_worldmotion(vis, selfmotion, params): ''' Short description of the function Args: vis (numpy.ndarray): 1xM array with the optic flow signal selfmotion (float): estimate of self motion params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of world motion in m/s ''' # running average to smooth/accumulate sensory evidence visualmotion = my_moving_window(vis, window=params['filterwindows'][1], FUN=np.mean) # take final value visualmotion = visualmotion[-1] # subtract selfmotion from value worldmotion = visualmotion + selfmotion # return final value return worldmotion # + [markdown] colab_type="text" id="CKXdgsGSGh1R" # #Micro-tutorial 8 - completing the model # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="4k9Dnf36EYC-" outputId="dada59b7-466c-4395-a3e4-8726c1011b8c" #@title Video: completing the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='-NiHSv4xCDs', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="Z-JpO3i8la12" # # **Goal:** Make sure the model can speak to the hypothesis. Eliminate all the parameters that do not speak to the hypothesis. # # Now that we have a working model, we can keep improving it, but at some point we need to decide that it is finished. Once we have a model that displays the properties of a system we are interested in, it should be possible to say something about our hypothesis and question. Keeping the model simple makes it easier to understand the phenomenon and answer the research question. Here that means that our model should have illusory perception, and perhaps make similar judgments to those of the participants, but not much more. # # To test this, we will run the model, store the output and plot the models' perceived self motion over perceived world motion, like we did with the actual perceptual judgments (it even uses the same plotting function). # + [markdown] colab_type="text" id="wPQh7JMW2crO" # ### **TD 8.1:** See if the model produces illusions # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 517} colab_type="code" id="14bGdwxX3mk1" outputId="6c93a2e7-7e01-4e49-e2dd-0fb5d52bf47e" #@title Run to plot model predictions of motion estimates # prepare to run the model again: data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':0.6, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 my_plot_percepts(datasets={'predictions':predictions}, plotconditions=True) # + [markdown] colab_type="text" id="ughghdXV5bhv" # **Questions:** # # * Why is the data distributed this way? How does it compare to the plot in TD 1.2? # * Did you expect to see this? # * Where do the model's predicted judgments for each of the two conditions fall? # * How does this compare to the behavioral data? # # However, the main observation should be that **there are illusions**: the blue and red data points are mixed in each of the two sets of data. Does this mean the model can help us understand the phenomenon? # + [markdown] colab_type="text" id="-Esgpp-5GlJY" # #Micro-tutorial 9 - testing and evaluating the model # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="a6GK1G7qbmg1" outputId="da5fdb52-d75e-437a-f41a-74d8e3473f7d" #@title Video: Background from IPython.display import YouTubeVideo video = YouTubeVideo(id='5vnDOxN3M_k', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="0ujFZwu6la19" # # **Goal:** Once we have finished the model, we need a description of how good it is. The question and goals we set in micro-tutorial 1 and 4 help here. There are multiple ways to evaluate a model. Aside from the obvious fact that we want to get insight into the phenomenon that is not directly accessible without the model, we always want to quantify how well the model agrees with the data. # # + [markdown] colab_type="text" id="K6nzpSVyWUJK" # ### Quantify model quality with $R^2$ # # Let's look at how well our model matches the actual judgment data. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 354} colab_type="code" id="PYgVPkSVla1-" outputId="31cd6d1d-c19a-446d-f324-5dd29caee45c" #@title Run to plot predictions over data my_plot_predictions_data(judgments, predictions) # + [markdown] colab_type="text" id="CznDTrrRDiME" # When model predictions are correct, the red points in the figure above should lie along the identity line (a dotted black line here). Points off the identity line represent model prediction errors. While in each plot we see two clusters of dots that are fairly close to the identity line, there are also two clusters that are not. For the trials that those points represent, the model has an illusion while the participants don't or vice versa. # # We will use a straightforward, quantitative measure of how good the model is: $R^2$ (pronounced: "R-squared"), which can take values between 0 and 1, and expresses how much variance is explained by the relationship between two variables (here the model's predictions and the actual judgments). It is also called [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), and is calculated here as the square of the correlation coefficient (r or $\rho$). Just run the chunk below: # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="efPMYHLVC0XN" outputId="6a795545-07cf-492f-e132-904b5143a483" #@title Run to calculate R^2 conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt) print('conditions -> judgments R^2: %0.3f'%( r_value**2 )) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict) print('predictions -> judgments R^2: %0.3f'%( r_value**2 )) # + [markdown] colab_type="text" id="l2naIlvUW09i" # These $R^2$s express how well the experimental conditions explain the participants judgments and how well the models predicted judgments explain the participants judgments. # # You will learn much more about model fitting, quantitative model evaluation and model comparison tomorrow! # # Perhaps the $R^2$ values don't seem very impressive, but the judgments produced by the participants are explained by the model's predictions better than by the actual conditions. In other words: the model tends to have the same illusions as the participants. # + [markdown] colab_type="text" id="DEWF_i_6H2IM" # ### **TD 9.1** Varying the threshold parameter to improve the model # # In the code below, see if you can find a better value for the threshold parameter, to reduce errors in the models' predictions. # # **Testing thresholds** # + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="wIdjtHtcH1ph" outputId="2aebef2f-f555-4d7f-8b89-7a26bf48bf8e" # Testing thresholds def test_threshold(threshold=0.33): # prepare to run model data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':threshold, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # get predictions in matrix predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 # get percepts from participants and model conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) # calculate R2 slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict) print('predictions -> judgments R2: %0.3f'%( r_value**2 )) test_threshold(threshold=0.5) # + [markdown] colab_type="text" id="G3qg0wEJyxkq" # ### **TD 9.2:** Credit assigmnent of self motion # # When we look at the figure in **TD 8.1**, we can see a cluster does seem very close to (1,0), just like in the actual data. The cluster of points at (1,0) are from the case where we conclude there is no self motion, and then set the self motion to 0. That value of 0 removes a lot of noise from the world-motion estimates, and all noise from the self-motion estimate. In the other case, where there is self motion, we still have a lot of noise (see also micro-tutorial 4). # # Let's change our `my_selfmotion()` function to return a self motion of 1 when the vestibular signal indicates we are above threshold, and 0 when we are below threshold. Edit the function here. # # **Template function for credit assigment of self motion** # # + cellView="both" colab={} colab_type="code" id="2D9wLipTy4F2" # Template binary self-motion estimates def my_selfmotion(ves, params): ''' Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s ''' # integrate signal: ves = np.cumsum(ves*(1/params['samplingrate'])) # use running window to accumulate evidence: selfmotion = my_moving_window(ves, window=params['filterwindows'][0], FUN=params['FUN']) ## take the final value as our estimate: selfmotion = selfmotion[-1] ########################################## # this last part will have to be changed # compare to threshold, set to 0 if lower and else... if selfmotion < params['threshold']: selfmotion = 0 #uncomment the lines below and fill in with your code #else: #YOUR CODE HERE # comment this out when you've filled raise NotImplementedError("Student excercise: modify with credit assignment") return selfmotion # + [markdown] cellView="both" colab={} colab_type="text" id="CyCeKL7mGACI" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_90571e21.py) # # # + [markdown] colab_type="text" id="l8HXAESh13Oc" # The function you just wrote will be used when we run the model again below. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 517} colab_type="code" id="hSs7bTFD153A" outputId="dcbfc3c5-60a0-455b-873d-6fec1bd6256e" #@title Run model credit assigment of self motion # prepare to run the model again: data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':0.33, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # no process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 my_plot_percepts(datasets={'predictions':predictions}, plotconditions=False) # + [markdown] colab_type="text" id="dddNTNQ8GpfT" # That looks much better, and closer to the actual data. Let's see if the $R^2$ values have improved: # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 388} colab_type="code" id="CdzQtFWOGzly" outputId="ed596b33-4d9e-4eaa-ef8b-b8e7dcd869e8" #@title Run to calculate R^2 for model with self motion credit assignment conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) my_plot_predictions_data(judgments, predictions) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt) print('conditions -> judgments R2: %0.3f'%( r_value**2 )) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(velpredict,veljudgmnt) print('predictions -> judgments R2: %0.3f'%( r_value**2 )) # + [markdown] colab_type="text" id="ynx87xTs17W3" # While the model still predicts velocity judgments better than the conditions (i.e. the model predicts illusions in somewhat similar cases), the $R^2$ values are actually worse than those of the simpler model. What's really going on is that the same set of points that were model prediction errors in the previous model are also errors here. All we have done is reduce the spread. # + [markdown] colab_type="text" id="iQniQk3Dyq-c" # ### Interpret the model's meaning # # Here's what you should have learned: # # 1. A noisy, vestibular, acceleration signal can give rise to illusory motion. # 2. However, disambiguating the optic flow by adding the vestibular signal simply adds a lot of noise. This is not a plausible thing for the brain to do. # 3. Our other hypothesis - credit assignment - is more qualitatively correct, but our simulations were not able to match the frequency of the illusion on a trial-by-trial basis. # # _It's always possible to refine our models to improve the fits._ # # There are many ways to try to do this. A few examples; we could implement a full sensory cue integration model, perhaps with Kalman filters (Week 2, Day 3), or we could add prior knowledge (at what time do the trains depart?). However, we decided that for now we have learned enough, so it's time to write it up. # # + [markdown] colab_type="text" id="THERtRx9Gn7s" # # Micro-tutorial 10 - publishing the model # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="2HD5Ve1GFDRG" outputId="678acdc9-893e-4bd8-cb05-c9a0f3529919" #@title Video: Background from IPython.display import YouTubeVideo video = YouTubeVideo(id='kf4aauCr5vA', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="suhFBllXULWb" # # **Goal:** In order for our model to impact the field, it needs to be accepted by our peers, and order for that to happen it matters how the model is published. # + [markdown] colab_type="text" id="WfFajjVDla2E" # ### **TD 10.1:** Write a summary of the project # # Here we will write up our model, by answering the following questions: # * **What is the phenomena**? Here summarize the part of the phenomena which your model addresses. # * **What is the key scientific question?**: Clearly articulate the question which your model tries to answer. # * **What was our hypothesis?**: Explain the key relationships which we relied on to simulate the phenomena. # * **How did your model work?** Give an overview of the model, it's main components, and how the model works. ''Here we ... '' # * **What did we find? Did the model work?** Explain the key outcomes of your model evaluation. # * **What can we conclude?** Conclude as much as you can _with reference to the hypothesis_, within the limits of the model. # * **What did you learn? What is left to be learned?** Briefly argue the plausibility of the approach and what you think is _essential_ that may have been left out. # # ### Guidance for the future # There are good guidelines for structuring and writing an effective paper (e.g. [Mensh & Kording, 2017](https://doi.org/10.1371/journal.pcbi.1005619)), all of which apply to papers about models. There are some extra considerations when publishing a model. In general, you should explain each of the steps in the paper: # # **Introduction:** Steps 1 & 2 (maybe 3) # # **Methods:** Steps 3-7, 9 # # **Results:** Steps 8 & 9, going back to 1, 2 & 4 # # In addition, you should provide a visualization of the model, and upload the code implementing the model and the data it was trained and tested on to a repository (e.g. GitHub and OSF). # # The audience for all of this should be experimentalists, as they are the ones who can test predictions made by your your model and collect new data. This way your models can impact future experiments, and that future data can then be modeled (see modeling process schematic below). Remember your audience - it is _always_ hard to clearly convey the main points of your work to others, especially if your audience doesn't necessarily create computational models themselves. # # ![how-to-model process from Blohm et al 2019](https://deniseh.lab.yorku.ca/files/2020/06/HowToModel-ENEURO.0352-19.2019.full_.pdf.png) # # ### Suggestion # # For every modeling project, a very good exercise in this is to _**first**_ write a short, 100-word abstract of the project plan and expected impact, like the summary you wrote. This forces focussing on the main points: describing the relevance, question, model, answer and what it all means very succinctly. This allows you to decide to do this project or not **before you commit time writing code for no good purpose**. Notice that this is really what we've walked you through carefully in this tutorial! :) # # + [markdown] colab_type="text" id="01n_LLDZ2ZWB" # # Post-script # # Note that the model we built here was extremely simple and used artificial data on purpose. It allowed us to go through all the steps of building a model, and hopefully you noticed that it is not always a linear process, you will go back to different steps if you hit a roadblock somewhere. # # However, if you're interested in how to actually approach modeling a similar phenomenon in a probabilistic way, we encourage you to read the paper by [Dokka et. al., 2019](https://doi.org/10.1073/pnas.1820373116), where the authors model how judgments of heading direction are influenced by objects that are also moving. # + [markdown] colab_type="text" id="41UU4oWvRmo6" # # Reading # # <NAME>, <NAME>, <NAME> (2020). _A How-to-Model Guide for Neuroscience_ eNeuro, 7(1) ENEURO.0352-19.2019. https://doi.org/10.1523/ENEURO.0352-19.2019 # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2019). _Causal inference accounts for heading perception in the presence of object motion._ PNAS, 116(18):9060-9065. https://doi.org/10.1073/pnas.1820373116 # # <NAME>, <NAME>, <NAME>, Angelaki DE, <NAME> (2014). _Optimal Multisensory Decision-Making in a Reaction-Time Task._ eLife, 3:e03005. https://doi.org/10.7554/eLife.03005 # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2014). _Direction detection thresholds of passive self-motion in artistic gymnasts._ Exp Brain Res, 232:1249–1258. https://doi.org/10.1007/s00221-014-3841-0 # # <NAME>, <NAME> (2017). _Ten simple rules for structuring papers._ PLoS Comput Biol 13(9): e1005619. https://doi.org/10.1371/journal.pcbi.1005619 # # <NAME>, <NAME> (2012). _Stimulus Meanings Alter Illusory Self-Motion (Vection) - Experimental Examination of the Train Illusion._ Seeing Perceiving, 25(6):631-45. https://doi.org/10.1163/18784763-00002394 #
tutorials/W1D2_ModelingPractice/student/W1D2_Tutorial2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Titania = <NAME> # On Bumble, the Queen of Fairies and the Queen of Bees got together to find some other queens. # # * Given # * Queen of Fairies # * Queen of Bees # * Solutions # * C [Ellery Queen](https://en.wikipedia.org/wiki/Ellery_Queen) = TDDTNW M UPZTDO # * L Queen of Hearts = THE L OF HEARTS # * E Queen Elizabeth = E ELIZABETH II # * R Steve McQueen = STEVE MC R MOVIES # * K Queen Latifah = K LATIFAH ALBUMS # * meta # # ``` # C/M L/O # E/T R/E # K/L # ``` # + import forge from puzzle.puzzlepedia import puzzlepedia puzzle = puzzlepedia.parse(""" LIT NPGRU IRL GWOLTNW LIT ENTTJ MPVVFU GWOLTNW LIT TEWYLFRU MNPOO GWOLTNW LIT OFRGTOT LCFU GWOLTNW LIT PNFEFU PV TZFD """, hint="cryptogram", threshold=1) # LIT NPGRU IRL GWOLTNW # THE ROMAN HAT MYSTERY # LIT ENTTJ MPVVFU GWOLTNW # THE GREEK COFFIN MYSTERY # LIT TEWYLFRU MNPOO GWOLTNW # THE EGYPTIAN CROSS MYSTERY # LIT OFRGTOT LCFU GWOLTNW # THE SIAMESE TWIN MYSTERY # LIT PNFEFU PV TZFD # THE ORIGIN OF EVIL # TDDTNW M UPZTDO # ELLERY C NOVELS # + import forge from puzzle.puzzlepedia import puzzlepedia puzzle = puzzlepedia.parse(""" KQLECDP NDWSDNLSI ZOMXFUSLDI LZZ BFPN PNDFQ NDMWI YOMRFUS KMQW """, hint="cryptogram") # Queen of Hearts # THELOFHEARTS # PNDOLZNDMQPI # CROQUET # KQLECDP # HEDGEHOGS # NDWSDNLSI # FLAMINGOES # ZOMXFUSLDI # OFF WITH THEIR HEADS # LZZ BFPN PNDFQ NDMWI # BLAZING CARD # YOMRFUS KMQW # + import forge from puzzle.puzzlepedia import puzzlepedia puzzle = puzzlepedia.parse(""" ZOXMNRBFGP DGQGXT XYIBNK DINRXT XFGIQTK QYRBTKL ITNBRNRB PYRGIXF YXTGR QNRTI """, hint="cryptogram") # TQN?GZTLF # Queen Elizabeth # EELIZABETHII # # BUCKINGHAM PALACE # ZOXMNRBFGP DGQGXT # CORGIS # XYIBNK # <NAME> # DINRXT XFGIQTK # LONGEST-REIGNING MONARCH # QYRBTKL ITNBRNRB PYRGIXF # OCEAN LINER # YXTGR QNRTI # + import forge from puzzle.puzzlepedia import puzzlepedia puzzle = puzzlepedia.parse(""" LUF ZTYSWDWMFSL VFQFS LUF YEFTL FVMTRF LUF LPXFEWSY WSDFESP RTRWJJPS LUF MWSMWSSTLW OWC """, hint="cryptogram", threshold=1) # <NAME> # STEVEMCRMOVIES # VLFQFZMEZPQWFV # THE MAGNIFICENT SEVEN # LUF ZTYSWDWMFSL VFQFS # THE GREAT ESCAPE # LUF YEFTL FVMTRF # THE TOWERING INFERNO # LUF LPXFEWSY WSDFESP # PAPILLON # RTRWJJPS # THE CINCINNATI KID # LUF MWSMWSSTLW OWC # + import forge from puzzle.puzzlepedia import puzzlepedia puzzle = puzzlepedia.parse(""" HZRWPO FY Z BDBRZ IQZVL PODTH FPGOP DH RNO VFWPR RNO GZHZ FXOHB ZQIWU SOPBFHZ """, hint="cryptogram", threshold=1) # Queen Latifah # LQZRDYZNZQIWUB # KLATIFAHALBUMS # NATURE OF A SISTA # HZRWPO FY Z BDBRZ # BLACK REIGN # IQZVL PODTH # ORDER IN THE COURT # FPGOP DH RNO VFWPR # THE DANA OVENS ALBUM # RNO GZHZ FXOHB ZQIWU # PERSONA # SOPBFHZ # + import forge from puzzle.puzzlepedia import puzzlepedia puzzle = puzzlepedia.parse(""" LQZRDYZNZQIWUB PNDOLZNDMQPI TDDTNWMUPZTDO TTQNJGZTLFNN VLFQFZMEZPQWFV """, hint="cryptogram") ################ # LQZRDYZNZQIWUB # KLATIFAHALBUMS = K / L ################ # PNDOLZNDMQPI # THELOFHEARTS = L / O ################ # TDDTNWMUPZTDO # ELLERYCNOVELS = C / M ################ # TTQNJGZTLFNN # EELIZABETHII = E / T ################ # VLFQFZMEZPQWFV # STEVEMCRMOVIES = R / E ################
src/puzzle/examples/puzzle_boat/4_warmup/titania.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="B48YJv5c6SNC" colab_type="code" colab={} # %%capture # !pip install --upgrade pip # !pip install cirq==0.7 # + [markdown] id="kL2C06ln6h48" colab_type="text" # # Rabi Oscillation Experiment # # In this experiment, you are going to use Cirq to check that rotating a qubit by an increasing angle, and then measuring the qubit, produces Rabi oscillations. This requires you to do the following things: # # 1. Prepare the $|0\rangle$ state. # 2. Rotate by an angle $\theta$ around the $X$ axis. # 3. Measure to see if the result is a 1 or a 0. # 4. Repeat steps 1-3 $k$ times. # 5. Report the fraction of $\frac{\text{Number of 1's}}{k}$ # found in step 3. # + [markdown] id="ACqqV6tJ7xXp" colab_type="text" # ## 1. Getting to know Cirq # # Cirq emphasizes the details of implementing quantum algorithms on near term devices. # For example, when you work on a qubit in Cirq you don't operate on an unspecified qubit that will later be mapped onto a device by a hidden step. # Instead, you are always operating on specific qubits at specific locations that you specify. # # Suppose you are working with a 54 qubit Sycamore chip. # This device is included in Cirq by default. # It is called `cirq.google.Sycamore`, and you can see its layout by printing it. # + id="rKoMKEw46XY7" colab_type="code" colab={} import cirq working_device = cirq.google.Sycamore print(working_device) # + [markdown] id="FJJEbuk-98Gj" colab_type="text" # For this experiment you only need one qubit and you can just pick whichever one you like. # + id="XoXekxuQ8bI0" colab_type="code" colab={} my_qubit = cirq.GridQubit(5, 6) # + [markdown] id="8Tucm7os-uET" colab_type="text" # Once you've chosen your qubit you can build circuits that use it. # + id="niH8sty--Hu0" colab_type="code" colab={} from cirq.contrib.svg import SVGCircuit # Create a circuit with X, Ry(pi/2) and H. my_circuit = cirq.Circuit( # Rotate the qubit pi/2 radians around the X axis. cirq.rx(3.141 / 2).on(my_qubit), # Measure the qubit. cirq.measure(my_qubit, key='out') ) SVGCircuit(my_circuit) # + [markdown] id="-zbI-2KUMU66" colab_type="text" # Now you can simulate sampling from your circuit using `cirq.Simulator`. # + id="IqUn4uv9_IVo" colab_type="code" colab={} sim = cirq.Simulator() samples = sim.sample(my_circuit, repetitions=10) samples # + [markdown] id="k-uAT6sHdGib" colab_type="text" # You can also get properties of the circuit, such as the density matrix of the circuit's output or the wavefunction just before the terminal measurement. # + id="83OqpReyHyUK" colab_type="code" colab={} wavefuntion_before_measurement = sim.simulate(my_circuit[:-1]) sampled_wavefunction_after_measurement = sim.simulate(my_circuit) print(f'State before measurement:') print(wavefuntion_before_measurement) print(f'State after measurement:') print(sampled_wavefunction_after_measurement) # + [markdown] id="1raIf8dsWHLJ" colab_type="text" # You can also examine the outputs from a noisy environment. # For example, an environment where 10% depolarization is applied to each qubit after each operation in the circuit: # + id="P7VW97ugWE_h" colab_type="code" colab={} noisy_sim = cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.1)) noisy_post_measurement_state = noisy_sim.simulate(my_circuit) noisy_pre_measurement_state = noisy_sim.simulate(my_circuit[:-1]) print('Noisy state after measurement:' + str(noisy_post_measurement_state)) print('Noisy state before measurement:' + str(noisy_pre_measurement_state)) # + [markdown] id="2h6yoOl4Rmwt" colab_type="text" # # 2. Parameterized Circuits and Sweeps # # Now that you have some of the basics end to end, you can create a parameterized circuit that rotates by an angle $\theta$: # + id="n6h6yuyGM58s" colab_type="code" colab={} import sympy theta = sympy.Symbol('theta') parameterized_circuit = cirq.Circuit( cirq.rx(theta).on(my_qubit), cirq.measure(my_qubit, key='out') ) SVGCircuit(parameterized_circuit) # + [markdown] id="rU3BBOp0S4sM" colab_type="text" # In the above block you saw that there is a `sympy.Symbol` that you placed in the circuit. Cirq supports symbolic computation involving circuits. What this means is that when you construct `cirq.Circuit` objects you can put placeholders in many of the classical control parameters of the circuit which you can fill with values later on. # # Now if you wanted to use `cirq.simulate` or `cirq.sample` with the parameterized circuit you would also need to specify a value for `theta`. # + id="SMdz-yAZSwrU" colab_type="code" colab={} samples_at_theta_equals_2 = sim.sample( parameterized_circuit, params={theta: 2}, repetitions=10) samples_at_theta_equals_2 # + [markdown] id="H_H13Hc8g873" colab_type="text" # You can also specify *multiple* values of `theta`, and get samples back for each value. # + id="0zjZxGY6hIsu" colab_type="code" colab={} samples_at_multiple_theta = sim.sample( parameterized_circuit, params=[{theta: 0.5}, {theta: 3.141}], repetitions=10) samples_at_multiple_theta # + [markdown] id="juuWvOEphaaE" colab_type="text" # Cirq has shorthand notation you can use to sweep `theta` over a range of values. # + id="8lCb3049hqXn" colab_type="code" colab={} samples_at_swept_theta = sim.sample( parameterized_circuit, params=cirq.Linspace(theta, start=0, stop=3.14159, length=5), repetitions=5) samples_at_swept_theta # + [markdown] id="wqaORMoKiAIW" colab_type="text" # The result value being returned by `sim.sample` is a `pandas.DataFrame` object. # Pandas is a common library for working with table data in python. # You can use standard pandas methods to analyze and summarize your results. # + id="bLzGV8nFiS9o" colab_type="code" colab={} import pandas big_results = sim.sample( parameterized_circuit, params=cirq.Linspace(theta, start=0, stop=3.14159, length=20), repetitions=10_000) # big_results is too big to look at. Plot cross tabulated data instead. pandas.crosstab(big_results.theta, big_results.out).plot() # + [markdown] id="b2TkL28AmBSQ" colab_type="text" # # 3. The built-in experiment # # Cirq comes with a pre-written Rabi oscillation experiment `cirq.experiments.rabi_oscillations`. # This method takes a `cirq.Sampler`, which could be a simulator or a network connection to real hardware. # The method takes a few more experimental parameters, and returns a result object # that can be plotted. # + id="ma0pVZwSThQx" colab_type="code" colab={} import datetime result = cirq.experiments.rabi_oscillations( sampler=noisy_sim, qubit=my_qubit, num_points=50, repetitions=10000) result.plot() # + [markdown] id="U-oezaJAnzJ8" colab_type="text" # Notice that you can tell from the plot that you used the noisy simulator you defined earlier. # You can also tell that the amount of depolarization is roughly 10%. # + [markdown] id="V6uE-yFxoT-3" colab_type="text" # # 4. Exercise: Find the best qubit # # As you have seen, you can use Cirq to perform a Rabi oscillation experiment. # You can either make the experiment yourself out of the basic pieces made available by Cirq, or use the prebuilt experiment method. # # Now you're going to put this knowledge to the test. # # There is some amount of depolarizing noise on each qubit. # Your goal is to characterize every qubit from the Sycamore chip using a Rabi oscillation experiment, and find the qubit with the lowest noise according to the secret noise model. # + id="-eISq1eqXYWx" colab_type="code" colab={} import hashlib class SecretNoiseModel(cirq.NoiseModel): def noisy_operation(self, op): # Hey! No peeking! q = op.qubits[0] v = hashlib.sha256(str(q).encode()).digest()[0] / 256 yield cirq.depolarize(v).on(q) yield op secret_noise_sampler = cirq.DensityMatrixSimulator(noise=SecretNoiseModel()) # + [markdown] id="rijcdXfFtaN0" colab_type="text" # # + id="Rvf87Wqrp-lu" colab_type="code" colab={} q = cirq.google.Sycamore.qubits[3] print('qubit', repr(q)) cirq.experiments.rabi_oscillations( sampler=secret_noise_sampler, qubit=q ).plot() # + id="-P6bCx4dvM0z" colab_type="code" colab={}
examples/Rabi_Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from os import listdir from os.path import isfile, join,getsize import glob import time import random from multiprocessing import Pool import multiprocessing as mp import pandas as pd import numpy as np pd.options.mode.chained_assignment = None pd.set_option('display.float_format', lambda x: '%.3f' % x) import pickle as pkl import warnings #warnings.simplefilter(action='ignore', category=ResourceWarning) np.random.seed(seed=1991) import tables from itertools import chain from spacy.lemmatizer import Lemmatizer from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES) # + br_to_us=pd.read_excel("Book.xlsx",header=1) br_to_us_dict=dict(zip(br_to_us.UK.tolist(),br_to_us.US.tolist())) contextwords=pkl.load( open( "contexts.pkl", "rb" ) ) words_list=pkl.load(open('words_list.pkl','rb')) batched_pkl_files=pkl.load(open('batched_pkl_files.pkl','rb')) spelling_replacement={'context':br_to_us_dict,'modifier':br_to_us_dict,'head':br_to_us_dict,'word':br_to_us_dict} # - any_word=r'.+_.+' any_noun=r'.+_noun' proper_noun=r'[a-z.-]+_noun' space=r'\s' def lemma_maker(x, y): #print(lemmatizer(x,y)[0]) return lemmatizer(x,y)[0] def relemjoin(df,col_name): new_col=col_name.split('_')[0] new_col_pos=new_col[0]+"_pos" df[new_col]=df[col_name].str.split('_', 1).str[0] df[new_col_pos]="noun" df[new_col]=np.vectorize(lemma_maker)(df[new_col], df[new_col_pos]) df.replace(spelling_replacement,inplace=True) df[new_col]=df[new_col]+"_noun" return df def syntactic_reducer(df,align,level=None): if len(df) == 0: print("Am here") return df if align=="right": if level=="word": #t1=time.time() df=df.loc[df.fivegram_pos.str.match(r"^"+any_noun+space+(any_word+space)*3+any_word+"$")] if len(df) == 0: return df df['word_pos'],df['r1_pos'],df['r2_pos'],df['r3_pos'],_=df['fivegram_pos'].str.split(space).str #df=df.query('word_pos == @word_list') df=relemjoin(df,'word_pos') df=pd.melt(df,id_vars=['word','year','count'],value_vars=['r1_pos','r2_pos','r3_pos']) #print(time.time()-t1) return df else: #phrases=df.loc[df.fivegram_pos.str.match(r'^[-a-z]+_noun\s+[-a-z]+_noun\s+[-a-z]+_.+\s+[-a-z]+_.+\s+[-a-z]+_.+$')] phrases=df.loc[df.fivegram_pos.str.match(r'^'+(any_noun+space)*2+(any_word+space)*2+any_word+'$')] #cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^[-a-z]+_noun\s+[-a-z]+_noun\s+[a-z-]+_noun\s+[a-z-]+_.+\s+[a-z-]+_.+$')] cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^'+(any_noun+space)*3+(any_word+space)+any_word+'$')] cdsm=cdsm.loc[cdsm.fivegram_pos.str.match(r'^'+(proper_noun+space)*2+(any_word+space)*2+any_word+'$')] try: phrases['modifier_pos'],phrases['head_pos'],phrases['r1_pos'],phrases['r2_pos'],phrases['r3_pos']=phrases['fivegram_pos'].str.split(space).str cdsm['modifier_pos'],cdsm['head_pos'],cdsm['r1_pos'],cdsm['r2_pos'],cdsm['r3_pos']=cdsm['fivegram_pos'].str.split(space).str except ValueError: phrases=pd.DataFrame() compounds=pd.DataFrame() modifiers=pd.DataFrame() heads=pd.DataFrame() return phrases,compounds,modifiers,heads phrases=relemjoin(phrases,'modifier_pos') phrases=relemjoin(phrases,'head_pos') cdsm=relemjoin(cdsm,'modifier_pos') cdsm=relemjoin(cdsm,'head_pos') phrases=pd.melt(phrases,id_vars=['modifier','head','year','count'],value_vars=['r1_pos','r2_pos','r3_pos']) compounds=pd.melt(cdsm,id_vars=['modifier','head','year','count'],value_vars=['r1_pos','r2_pos','r3_pos']) modifiers=pd.melt(cdsm,id_vars=['modifier','year','count'],value_vars=['head','r1_pos','r2_pos']) heads=pd.melt(cdsm,id_vars=['head','year','count'],value_vars=['modifier','r1_pos','r2_pos','r3_pos']) return phrases,compounds,modifiers,heads elif align=="mid1": if level=="word": #df=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_.+$')] #df=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_.+$')] df=df.loc[df.fivegram_pos.str.match(r"^"+any_word+space+any_noun+space+(any_word+space)*2+any_word+"$")] if len(df) == 0: return df df['l1_pos'],df['word_pos'],df['r1_pos'],df['r2_pos'],df['r3_pos']=df['fivegram_pos'].str.split(space).str df=relemjoin(df,'word_pos') df=pd.melt(df,id_vars=['word','year','count'],value_vars=['l1_pos','r1_pos','r2_pos','r3_pos']) return df else: #phrases=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_.+\s+[a-z-]+_.+$')] phrases=df.loc[df.fivegram_pos.str.match(r'^'+any_word+space+(any_noun+space)*2+any_word+space+any_word+'$')] #cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_.+$')] cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^'+(any_noun+space)*4+any_word+'$')] cdsm=cdsm.loc[cdsm.fivegram_pos.str.match(r'^'+any_word+space+(proper_noun+space)*2+any_word+space+any_word+'$')] try: phrases['l1_pos'],phrases['modifier_pos'],phrases['head_pos'],phrases['r1_pos'],phrases['r2_pos']=phrases['fivegram_pos'].str.split(space).str cdsm['l1_pos'],cdsm['modifier_pos'],cdsm['head_pos'],cdsm['r1_pos'],cdsm['r2_pos']=cdsm['fivegram_pos'].str.split(space).str except ValueError: phrases=pd.DataFrame() compounds=pd.DataFrame() modifiers=pd.DataFrame() heads=pd.DataFrame() return phrases,compounds,modifiers,heads phrases=relemjoin(phrases,'modifier_pos') phrases=relemjoin(phrases,'head_pos') cdsm=relemjoin(cdsm,'modifier_pos') cdsm=relemjoin(cdsm,'head_pos') phrases=pd.melt(phrases,id_vars=['modifier','head','year','count'],value_vars=['l1_pos','r1_pos','r2_pos']) compounds=pd.melt(cdsm,id_vars=['modifier','head','year','count'],value_vars=['l1_pos','r1_pos','r2_pos']) modifiers=pd.melt(cdsm,id_vars=['modifier','year','count'],value_vars=['head','l1_pos','r1_pos','r2_pos']) heads=pd.melt(cdsm,id_vars=['head','year','count'],value_vars=['modifier','l1_pos','r1_pos','r2_pos']) return phrases,compounds,modifiers,heads elif align=="mid2": if level=="word": #df=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_.+\s+[a-z-]+_.+$')] df=df.loc[df.fivegram_pos.str.match(r'^'+(any_word+space)*2+any_noun+space+any_word+space+any_word+'$')] if len(df) == 0: return df df['l1_pos'],df['l2_pos'],df['word_pos'],df['r1_pos'],df['r2_pos']=df['fivegram_pos'].str.split(space).str df=relemjoin(df,'word_pos') df=pd.melt(df,id_vars=['word','year','count'],value_vars=['l1_pos','l2_pos','r1_pos','r2_pos']) return df else: #phrases=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_.+$')] phrases=df.loc[df.fivegram_pos.str.match(r'^'+(any_word+space)*2+(any_noun+space)*2+any_word+'$')] #cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_noun$')] cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^'+any_word+space+(any_noun+space)*3+any_word+'$')] cdsm=cdsm.loc[cdsm.fivegram_pos.str.match(r'^'+(any_word+space)*2+(proper_noun+space)*2+any_word+'$')] try: phrases['l1_pos'],phrases['l2_pos'],phrases['modifier_pos'],phrases['head_pos'],phrases['r1_pos']=phrases['fivegram_pos'].str.split(space).str cdsm['l1_pos'],cdsm['l2_pos'],cdsm['modifier_pos'],cdsm['head_pos'],cdsm['r1_pos']=cdsm['fivegram_pos'].str.split(space).str except ValueError: phrases=pd.DataFrame() compounds=pd.DataFrame() modifiers=pd.DataFrame() heads=pd.DataFrame() return phrases,compounds,modifiers,heads phrases=relemjoin(phrases,'modifier_pos') phrases=relemjoin(phrases,'head_pos') cdsm=relemjoin(cdsm,'modifier_pos') cdsm=relemjoin(cdsm,'head_pos') phrases=pd.melt(phrases,id_vars=['modifier','head','year','count'],value_vars=['l1_pos','l2_pos','r1_pos']) compounds=pd.melt(cdsm,id_vars=['modifier','head','year','count'],value_vars=['l1_pos','l2_pos','r1_pos']) modifiers=pd.melt(cdsm,id_vars=['modifier','year','count'],value_vars=['head','l1_pos','l2_pos','r1_pos']) heads=pd.melt(cdsm,id_vars=['head','year','count'],value_vars=['modifier','l1_pos','l2_pos','r1_pos']) return phrases,compounds,modifiers,heads elif align=="mid3": #df=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_.+$')] df=df.loc[df.fivegram_pos.str.match(r'^'+(any_word+space)*3+any_noun+space+any_word+'$')] if len(df)==0: return df df['l1_pos'],df['l2_pos'],df['word_pos'],df['r1_pos'],df['r2_pos']=df['fivegram_pos'].str.split(space).str df=relemjoin(df,'word_pos') df=pd.melt(df,id_vars=['word','year','count'],value_vars=['l1_pos','l2_pos','r1_pos','r2_pos']) return df elif align=="left": if level=="word": #df=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_noun$')] df=df.loc[df.fivegram_pos.str.match(r'^'+(any_word+space)*4+any_noun+'$')] if len(df) == 0: return df _,df['l1_pos'],df['l2_pos'],df['l3_pos'],df['word_pos']=df['fivegram_pos'].str.split(space).str df=relemjoin(df,'word_pos') df=pd.melt(df,id_vars=['word','year','count'],value_vars=['l1_pos','l2_pos','l3_pos']) return df else: #phrases=df.loc[df.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_noun$')] phrases=df.loc[df.fivegram_pos.str.match(r'^'+(any_word+space)*3+any_noun+space+any_noun+'$')] #cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^[a-z-]+_.+\s+[a-z-]+_.+\s+[a-z-]+_noun\s+[a-z-]+_noun\s+[a-z-]+_noun$')] cdsm=phrases.loc[~phrases.fivegram_pos.str.match(r'^'+(any_word+space)*2+(any_noun+space)*2+any_noun+'$')] cdsm=cdsm.loc[cdsm.fivegram_pos.str.match(r'^'+(any_word+space)*3+proper_noun+space+proper_noun+'$')] try: phrases['l1_pos'],phrases['l2_pos'],phrases['l3_pos'],phrases['modifier_pos'],phrases['head_pos']=phrases['fivegram_pos'].str.split(space).str cdsm['l1_pos'],cdsm['l2_pos'],cdsm['l3_pos'],cdsm['modifier_pos'],cdsm['head_pos']=cdsm['fivegram_pos'].str.split(space).str except ValueError: phrases=pd.DataFrame() compounds=pd.DataFrame() modifiers=pd.DataFrame() heads=pd.DataFrame() return phrases,compounds,modifiers,heads phrases=relemjoin(phrases,'modifier_pos') phrases=relemjoin(phrases,'head_pos') cdsm=relemjoin(cdsm,'modifier_pos') cdsm=relemjoin(cdsm,'head_pos') phrases=pd.melt(phrases,id_vars=['modifier','head','year','count'],value_vars=['l1_pos','l2_pos','l3_pos']) compounds=pd.melt(cdsm,id_vars=['modifier','head','year','count'],value_vars=['l1_pos','l2_pos','l3_pos']) modifiers=pd.melt(cdsm,id_vars=['modifier','year','count'],value_vars=['head','l1_pos','l2_pos','l3_pos']) heads=pd.melt(cdsm,id_vars=['head','year','count'],value_vars=['modifier','l1_pos','l2_pos','l3_pos']) return phrases,compounds,modifiers,heads def context_reducer(df): if len(df)==0: return df df["variable"]=df["variable"].str.replace(r"_pos","") df["context"],df["context_pos"]=df['value'].str.split('_', 1).str df=df.loc[df.context_pos.isin(["noun","adj","adv","verb"])] #df.replace(adv_replacement,inplace=True) #df['context_pos']=df['context_pos'].str[0] if len(df)==0: return df df['context']=np.vectorize(lemma_maker)(df['context'], df['context_pos']) df.replace(spelling_replacement,inplace=True) df['context']=df['context']+"_"+df['context_pos'] df.query('context in @contextwords',inplace=True) #df.reset_index(inplace=True) return df def cdsm_word_reducer(df): rightgram=syntactic_reducer(df,align="right",level="word") rightgram=context_reducer(rightgram) mid1gram=syntactic_reducer(df,align="mid1",level="word") mid1gram=context_reducer(mid1gram) mid2gram=syntactic_reducer(df,align="mid2",level="word") mid2gram=context_reducer(mid2gram) mid3gram=syntactic_reducer(df,align="mid3",level="word") mid3gram=context_reducer(mid3gram) leftgram=syntactic_reducer(df,align="left",level="word") leftgram=context_reducer(leftgram) words_df=pd.concat([rightgram,mid1gram,mid2gram,mid3gram,leftgram],ignore_index=True,sort=False) words_df.dropna(inplace=True) words_df=words_df.query('word in @words_list') words_df=words_df.groupby(['word','context','year'])['count'].sum().to_frame() words_df.reset_index(inplace=True) words_df.year=words_df.year.astype("int32") #print(words_df.shape) return words_df def cdsm_reducer(df): phrase_rightgram,compound_rightgram,modifier_rightgram,head_rightgram=syntactic_reducer(df,align="right") phrase_rightgram=context_reducer(phrase_rightgram) compound_rightgram=context_reducer(compound_rightgram) modifier_rightgram=context_reducer(modifier_rightgram) head_rightgram=context_reducer(head_rightgram) phrase_mid1gram,compound_mid1gram,modifier_mid1gram,head_mid1gram=syntactic_reducer(df,align="mid1") phrase_mid1gram=context_reducer(phrase_mid1gram) compound_mid1gram=context_reducer(compound_mid1gram) modifier_mid1gram=context_reducer(modifier_mid1gram) head_mid1gram=context_reducer(head_mid1gram) phrase_mid2gram,compound_mid2gram,modifier_mid2gram,head_mid2gram=syntactic_reducer(df,align="mid2") phrase_mid2gram=context_reducer(phrase_mid2gram) compound_mid2gram=context_reducer(compound_mid2gram) modifier_mid2gram=context_reducer(modifier_mid2gram) head_mid2gram=context_reducer(head_mid2gram) phrase_leftgram,compound_leftgram,modifier_leftgram,head_leftgram=syntactic_reducer(df,align="left") phrase_leftgram=context_reducer(phrase_leftgram) compound_leftgram=context_reducer(compound_leftgram) modifier_leftgram=context_reducer(modifier_leftgram) head_leftgram=context_reducer(head_leftgram) phrases=pd.concat([phrase_rightgram,phrase_mid1gram,phrase_mid2gram,phrase_leftgram],ignore_index=True,sort=False) compounds=pd.concat([compound_rightgram,compound_mid1gram,compound_mid2gram,compound_leftgram],ignore_index=True,sort=False) modifiers=pd.concat([modifier_rightgram,modifier_mid1gram,modifier_mid2gram,modifier_leftgram],ignore_index=True,sort=False) heads=pd.concat([head_rightgram,head_mid1gram,head_mid2gram,head_leftgram],ignore_index=True,sort=False) phrases.dropna(inplace=True) phrases=phrases.groupby(['modifier','head','context','year'])['count'].sum().to_frame() phrases.reset_index(inplace=True) phrases.year=phrases.year.astype("int32") compounds.dropna(inplace=True) compounds=compounds.groupby(['modifier','head','context','year'])['count'].sum().to_frame() compounds.reset_index(inplace=True) compounds.year=compounds.year.astype("int32") modifiers.dropna(inplace=True) modifiers=modifiers.groupby(['modifier','context','year'])['count'].sum().to_frame() modifiers.reset_index(inplace=True) modifiers.year=modifiers.year.astype("int32") heads.dropna(inplace=True) heads=heads.groupby(['head','context','year'])['count'].sum().to_frame() heads.reset_index(inplace=True) heads.year=heads.year.astype("int32") return compounds,modifiers,heads,phrases def parallelize_dataframe(df,save_loc,num_cores): num_partitions = num_cores df_split = np.array_split(df, num_partitions) print("Done splitting the datasets") pool = Pool(num_cores) cur_time=time.time() print("Starting parallelizing") if not word: results=pool.map_async(cdsm_reducer,df_split) pool.close() pool.join() results=results.get() print("Done parallelizing") print("Total time taken",round(time.time()-cur_time),"secs") compound_list = [ result[0] for result in results] compounds=pd.concat(compound_list,ignore_index=True) compounds=compounds.groupby(['modifier','head','context','year'])['count'].sum().to_frame() compounds.reset_index(inplace=True) if not isfile("/data/dharp/compounding/datasets/compounds.csv"): compounds.to_csv("/data/dharp/compounding/datasets/compounds.csv",sep="\t",index=False) else: compounds.to_csv("/data/dharp/compounding/datasets/compounds.csv", mode='a',sep="\t", header=False,index=False) modifier_list = [ result[1] for result in results] modifiers=pd.concat(modifier_list,ignore_index=True) modifiers=modifiers.groupby(['modifier','context','year'])['count'].sum().to_frame() modifiers.reset_index(inplace=True) if not isfile("/data/dharp/compounding/datasets/modifiers.csv"): modifiers.to_csv("/data/dharp/compounding/datasets/modifiers.csv",sep="\t",index=False) else: modifiers.to_csv("/data/dharp/compounding/datasets/modifiers.csv", mode='a',sep="\t",header=False,index=False) head_list = [ result[2] for result in results] heads=pd.concat(head_list,ignore_index=True) heads=heads.groupby(['head','context','year'])['count'].sum().to_frame() heads.reset_index(inplace=True) if not isfile("/data/dharp/compounding/datasets/heads.csv"): heads.to_csv("/data/dharp/compounding/datasets/heads.csv",sep="\t",index=False) else: heads.to_csv("/data/dharp/compounding/datasets/heads.csv", mode='a',sep="\t",header=False,index=False) phrase_list = [ result[3] for result in results] phrases=pd.concat(phrase_list,ignore_index=True) phrases=phrases.groupby(['modifier','head','context','year'])['count'].sum().to_frame() phrases.reset_index(inplace=True) if not isfile("/data/dharp/compounding/datasets/phrases.csv"): phrases.to_csv("/data/dharp/compounding/datasets/phrases.csv",sep="\t",index=False) else: phrases.to_csv("/data/dharp/compounding/datasets/phrases.csv", mode='a',sep="\t",header=False,index=False) else: words_list=[] results=pool.map_async(cdsm_word_reducer,df_split) pool.close() pool.join() print("Done parallelizing") print("Total time taken",round(time.time()-cur_time),"secs") words_list=results.get() words = pd.concat(words_list,ignore_index=True,sort=False) words=words.groupby(['word','context','year'])['count'].sum().to_frame() words.reset_index(inplace=True) #print(words.shape) if not isfile(save_loc): words.to_csv(save_loc,sep="\t",index=False,header=True) else: words.to_csv(save_loc, mode='a',sep="\t", header=False,index=False) print("Done concatenations \n") num_cores=mp.cpu_count()-1 store = pd.HDFStore('/data/dharp/compounding/datasets/entire_df_0.h5') nrows = store.get_storer('df').nrows chunksize = 100_000 i=0 word=True chunk = store.select('df',start=i*chunksize,stop=(i+1)*chunksize) parallelize_dataframe(chunk,save_loc='/data/dharp/compounding/datasets/words_test.csv',num_cores=num_cores) store.close() return_df=cdsm_word_reducer(chunk) tmp_df.info() #print(files[0],'is read in') tmp_df_klein=tmp_df.head(10_000_000) df_split = np.array_split(tmp_df_klein, 10) for num,df in enumerate(df_split): print("Split num:",num+1) parallelize_dataframe(df,context_type="dependant",num_cores=num_cores) print("Done writing to files")
src/Notebooks/GoogleCompounder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # cd ../../ import pandas as pd import numpy as np import matplotlib.pyplot as plt from src.toolbox.utils import _nms from src.toolbox.eval import evaluate, accumulate_metrics, categorize_results, summarize_results_per_class from src.toolbox.utils import _nms from src.toolbox.visualization import plot_performance_per_class, plot_performance_per_duration from src.toolbox.data_converters import CharadesSTA2Instances import seaborn as sns import pickle as pkl import json import neptune sns.set_style("white") # load ground truth test set test_data = CharadesSTA2Instances(pd.read_csv("data/processed/charades/charades_test.csv")) project = neptune.init("mayu-ot/moment-retrieval") exp_id = "MOM-6" if not os.path.exists(f"tmp/{exp_id}/{exp_id}199.json"): exps = project.get_experiments(id=exp_id) print("downloading results ...") exps[0].download_artifact(f"{exp_id}199.json", f"tmp/{exp_id}") print("done!") # + def get_duration(video_id): for x in test_data: if x[0][0] == video_id: return x[1][-1] def get_evaluation_results(predictions): preds = [] for p in predictions: query = (p[0], p[2]+".") video_duration = get_duration(query[0]) seg = [s + [video_duration] for s in p[3][:10]] preds.append((query, seg, p[5][:10])) results = evaluate(test_data, preds) summary = accumulate_metrics(results) return results, summary # - predictions = json.load(open(f"tmp/{exp_id}/{exp_id}199.json")) results, summary = get_evaluation_results(predictions) # Check relation between success rates and iput video durations plt.rcParams.update({'font.size': 14}) fig = plot_performance_per_duration(results, test_data, ax=plt.gca()) # + # per-action performance from src.toolbox.eval import get_first_action, categorize_results, summarize_results_per_class from src.toolbox.utils import _load_top_actions top_actions = _load_top_actions("charade") cat_fn = lambda x: get_first_action(x, top_actions) categorized_results = categorize_results(results, cat_fn) metrics_per_cls = summarize_results_per_class(categorized_results) class_labels = list(metrics_per_cls.keys()) frequent_class = [label for label in class_labels if metrics_per_cls[label]["n_instance"] > 10] _ = plot_performance_per_class({l: v for l, v in metrics_per_cls.items() if l in frequent_class})
notebooks/report/SCDM_CharadeSTA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python3 # language: python # name: python3 # --- # + [markdown] originalKey="<KEY>" # # High-Dimensional Bayesian Optimization with SAASBO # # # This tutorial shows how to use the Sparse Axis-Aligned Subspace Bayesian Optimization (SAASBO) method for high-dimensional Bayesian optimization [1]. SAASBO uses sparse axis-aligned subspace priors to avoid overfitting. Specifically, SAASBO uses a hierarchical sparsity prior consisting of a global shrinkage parameter with a Half-Cauchy prior $\tau \sim \mathcal{HC}(\beta)$ and inverse lengthscales $\rho_d \sim \mathcal{HC}(\tau)$ for $d=1, ..., D$. See [1] for details. # # [1] <NAME>, <NAME>. High-Dimensional Bayesian Optimization with Sparse Axis-Aligned Subspaces. Proceedings of the Thirty-Seventh Conference on Uncertainty in Artificial Intelligence, 2021. # + originalKey="<KEY>" from ax import ParameterType, RangeParameter, SearchSpace, SimpleExperiment from ax.benchmark.benchmark import full_benchmark_run from ax.benchmark.benchmark_result import aggregate_problem_results, BenchmarkResult from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy from ax.modelbridge.registry import Models # + originalKey="<KEY>" import torch tkwargs = { "dtype": torch.double, "device": torch.device("cuda" if torch.cuda.is_available() else "cpu"), } # + [markdown] originalKey="8fa9ef26-d16f-4d28-9957-877754a76dfe" # ## Setup methods # # This tutorial uses Ax Developer API with the Ax benchmarking suite, but the generation strategy created here cam be used in any Ax API, including Service API and `Scheduler`. Additional resources: # - To learn about different APIs in Ax: https://ax.dev/docs/api.html. # - To learn more about the developer API, refer to the dedicated tutorial: https://ax.dev/tutorials/gpei_hartmann_developer.html. # - To learn more about the generation strategy setup: https://ax.dev/tutorials/generation_strategy.html. # + code_folding=[] hidden_ranges=[] originalKey="cad0ae99-e0cf-4a4b-a5eb-ca059a18a4ca" gpei = GenerationStrategy( steps=[ GenerationStep( model=Models.SOBOL, num_trials=10, # How many trials should be produced from this generation step min_trials_observed=5, # How many trials need to be completed to move to next model max_parallelism=None, # Max parallelism for this step; `None` means no limit ), GenerationStep( model=Models.BOTORCH, num_trials=-1, max_parallelism=5, ), ], name="Sobol+GPEI" ) saasbo = GenerationStrategy( steps=[ GenerationStep( model=Models.SOBOL, num_trials=10, min_trials_observed=5, max_parallelism=-1, ), GenerationStep( model=Models.FULLYBAYESIAN, num_trials=-1, max_parallelism=5, model_kwargs={ # Any kwargs you want passed into the model "num_samples": 256, "warmup_steps": 512, "disable_progbar": True, "torch_device": tkwargs["device"], "torch_dtype": tkwargs["dtype"], }, ), ], name="SAASBO" ) # + [markdown] originalKey="<KEY>" # ## Setup search space and metric # In this simple experiment we use the Branin function embedded in a 30-dimensional space. Additional resources: # - To set up a custom metric for your problem, refer to the dedicated section of the Developer API tutorial: https://ax.dev/tutorials/gpei_hartmann_developer.html#8.-Defining-custom-metrics. # - To avoid needing to setup up custom metrics by Ax Service API: https://ax.dev/tutorials/gpei_hartmann_service.html. # + originalKey="<KEY>" from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.metrics.branin import BraninMetric from ax.benchmark.benchmark_problem import BenchmarkProblem branin_30 = BenchmarkProblem( name="Branin, D=30", optimal_value=0.397887, optimization_config=OptimizationConfig( objective=Objective( metric=BraninMetric( name="objective", param_names=["x9", "x24"], noise_sd=0.0 ), minimize=True, ) ), search_space=SearchSpace( parameters=[ RangeParameter( name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=-5.0, upper=10.0 ) for i in range(15) ] + [ RangeParameter( name=f"x{i + 15}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=15.0, ) for i in range(15) ] ), evaluate_suggested=False, ) # + [markdown] originalKey="379571df-a141-48f7-84de-f75bc6e8e760" # ## Run benchmark # + originalKey="bdfeea50-c4e6-4ff1-91ae-c8f6c3160d7c" benchmarking_experiments = full_benchmark_run( problem_groups={"default": [branin_30]}, method_groups={"default": [gpei, saasbo]}, num_replications=1, num_trials=30, batch_size=1, raise_all_exceptions=True, verbose_logging=True ) # - # ## Aggregate results # + originalKey="f102010f-ea3a-4ac4-954f-c13a4dff1d4b" res = aggregate_problem_results(benchmarking_experiments[branin_30.name], problem=branin_30) res_gp_ei = res.true_performance['Sobol+GPEI'].ravel() res_saasbo = res.true_performance["SAASBO"].ravel() # - # ## Plot results # + originalKey="<KEY>" import matplotlib import matplotlib.pyplot as plt import numpy as np # %matplotlib inline matplotlib.rcParams.update({"font.size": 16}) fig, ax = plt.subplots(figsize=(8, 6)) ax.plot(np.minimum.accumulate(res_saasbo), color="b", label="SAASBO") ax.plot(np.minimum.accumulate(res_gp_ei), color="r", label="GP-EI") ax.plot([0, len(res_saasbo)], [res.optimum, res.optimum], "--", c="g", lw=3, label="Optimal value") ax.grid(True) ax.set_title("Branin, D=30", fontsize=20) ax.set_xlabel("Number of evaluations", fontsize=20) ax.set_xlim([0, len(res_saasbo)]) ax.set_ylabel("Best value found", fontsize=20) ax.set_ylim([0, 8]) ax.legend(fontsize=18) plt.show()
tutorials/saasbo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JayDown/3d-converter/blob/master/MediaPipe_Iris_model_converter_for_Barracuda.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ySnBmjCkqDKr" # # MediaPipe Iris model converter for Unity Barracuda # # ## What it does # # - Converts .tflite into ONNX using tflite2onnx # - Replace Pad operators with combinations of ConstantOfShape and Concat. # - Add Expand operators to PRelu slope inputs. # # ## Why it's needed # # - The current implementation of Barracuda doesn't support non-spatial axis padding, so I had to replace them with concatenation with zero-filled tensors. # - The current implementation of the PRelu activator in Barracuda doesn't support undirectional broadcasting, so I had to expand the slope tensors before feeding to the activators. # + [markdown] id="rARLDT3h6jID" # # Setup # + colab={"base_uri": "https://localhost:8080/"} id="B0NwwIsXa7vQ" outputId="ab3f02ff-5e81-4b92-8991-cf33bfdb4274" # %pip install tflite2onnx # + [markdown] id="euRtm0njpxHq" # # tflite to ONNX conversion # + colab={"base_uri": "https://localhost:8080/"} id="hDN4knuQoXF8" outputId="3d6fdfab-024a-416e-ce2d-6f617cdb5d0a" # !wget https://github.com/google/mediapipe/raw/master/mediapipe/models/iris_landmark.tflite # + id="nin0IiqyoWmd" # !tflite2onnx iris_landmark.tflite iris_landmark.onnx # + [markdown] id="u2Bu-Vw6lBK1" # # Converter implementation # + id="Z8bu5UbDlA55" import numpy as np import onnx from onnx import checker, helper from onnx import AttributeProto, TensorProto, GraphProto from onnx import numpy_helper as np_helper # + id="clcbavcIsmqu" # Shape tensor generator def get_shape_tensor(model, shape): name = 'shape_{0}x{1}x{2}x{3}'.format(*shape) # If the initializer already exists, simply use it. exists = any(x for x in model.graph.initializer if x.name == name) if exists: return name # Add the initializer for the tensor. tensor = helper.make_tensor(name, TensorProto.INT64, (4,), shape) model.graph.initializer.append(tensor) return name # + [markdown] id="DdUaOQVdlLwI" # ## Pad operator replacement # + id="1opFpNS_lGIQ" def replace_pad_ops(model): i = 0 while i < len(model.graph.node): # Node type check node = model.graph.node[i] if node.op_type != 'Pad': i += 1; continue # Pad node input data = next(n for n in model.graph.value_info if n.name == node.input[0]) pads = next(n for n in model.graph.initializer if n.name == node.input[1]) # Shape tensor dim = tuple(map(lambda x: x.dim_value, data.type.tensor_type.shape.dim)) ext = np_helper.to_array(pads)[5] shape_tensor = get_shape_tensor(model, (1, ext, dim[2], dim[3])) # Replacement nodes const_out = node.name + '_pad' const_node = helper.make_node('ConstantOfShape', (shape_tensor,), (const_out,)) concat_node = helper.make_node('Concat', (data.name, const_out), (node.output[0],), axis = 1) # Graph modification model.graph.node.insert(i, const_node) model.graph.node.insert(i + 1, concat_node) model.graph.node.remove(node) i += 2 # + [markdown] id="W25ZKEkhlbbC" # ## PRelu operator replacement # + id="9U085LqKmhmb" def replace_prelu_ops(model): i = 0 while i < len(model.graph.node): # Node type check node = model.graph.node[i] if node.op_type != 'PRelu': i += 1; continue # PRelu node input input = next(n for n in model.graph.value_info if n.name == node.input[0]) # Shape tensor dim = tuple(map(lambda x: x.dim_value, input.type.tensor_type.shape.dim)) shape_tensor = get_shape_tensor(model, dim) # Replacement nodes expand_out = node.name + '_expand' expand_node = helper.make_node('Expand', (node.input[1], shape_tensor), (expand_out,)) prelu_node = helper.make_node('PRelu', (input.name, expand_out), (node.output[0],)) # Graph modification model.graph.node.insert(i, expand_node) model.graph.node.insert(i + 1, prelu_node) model.graph.node.remove(node) i += 2 # + [markdown] id="zyLXZZ8gp42D" # # ONNX to ONNX (Barracuda) conversion # + id="_qXwtjWA7VDw" model = onnx.load("iris_landmark.onnx") replace_pad_ops(model) replace_prelu_ops(model) checker.check_model(model) onnx.save(model, "iris_landmark_barracuda.onnx")
MediaPipe_Iris_model_converter_for_Barracuda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # (Recolectar Datos), Explorar y Visualizar # * En este ejemplo, la etapa de recolección ya ha sido efectuada; vamos a trabajar con el dataset iris, incluido como parte de `sklearn` # * En la mayor parte de los casos, los datos tienen que ser primero pre-procesados y analizados por medio de procesos de exploración. `sklearn` incluye muchas herramientas para hacer esto, junto con otros paquetes como `pandas`. # # <img src='imgs/fancy_process.png' alt="Smiley face" width="700"><br> # # ## El set de datos - Fisher's Irises # La mayor parte de los algoritmos de aprendizaje de maquina implementados en scikit-learn esperan que los datos se encuentren en una **matriz o arreglo bi-dimensional**. Estos arreglos pueden ser almacenados en estructuras provistas por medio de ``numpy``, ``scipy``, ``pandas``, etc. # # El tamaño del arreglo es dado como `n_muestras x n_atributos`. # # - **n_muestras:** El número de muestras u observaciones: Cada una es una entrada en el proceso de clasificación. Puede ser un documento, una imagen, un video, un objeto astronómico, una fila en una base de datos o archivo de texto, etc. siempre y cuando pueda describir un conjunto de atributos cuantitativos. # # - **n_atributos:** El número de atributos o rasgos distintos que pueden ser usados para describir cada una de las muestras de manera cuantitativa. Estos atributos son generalmente valores reales, pero pueden ser representados de alguna manera booleana o discreta.<br> # # El número de atributos es fijo, y su extensión se describe por medio de la dimensionalidad o cardinalidad. Dependiendo de los datos representados, pueden almacenarse en estructuras especializadas como `scipy.sparse` para hacer un mejor manejo de los datos. # <br> # # Si existen etiquetas o clases objetivo, deben ser almacenadas en un arreglo de una dimensión. # Para este ejemplo, se utilizará el dataset **iris**. incluido con `sklearn`. Es un dataset sencillo y facil de describir. # > <b>Recordar el proceso de ML: Formular preguntas.</b><br><br>Ejemplo: ¿Qué tipo de flor es la más cercana, dado las 3 clases con las que cuenta el dataset? # # <img border="0" alt="iris species" src="imgs/iris-setosa.jpg" width="200"> # <p align="right">de http://www.madlantern.com/photography/wild-iris</p> # # ### Etiquetas (nombres de especies/clases): # <img border="0" alt="iris species" src="imgs/irises.png" width="500" height="500"> # TIP: Comunmente, los algoritmos de machine learning van a requerir que los datos se encuentren estandarizados, normalizados o regularizados. Esto permite que los datos se encuentren en la estructura correcta. # + # Imports for python 2/3 compatibility from __future__ import absolute_import, division, print_function, unicode_literals # For python 2, comment these out: # from builtins import range # - # Pregunta rápida: # 1. Qué podemos esperar de un data set a partir del cual queremos reconocer una especie de iris? # * Para la estructura `[n_muestras x n_atributos]` qué datos podemos esperar para: # * las muestras? # * los atributos? # + from sklearn import datasets iris = datasets.load_iris() print(type(iris.data)) print(type(iris.target)) # - # ## Empecemos! # %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt # #### Atributos (columnas en los datos) # + import pandas as pd from sklearn import datasets iris = datasets.load_iris() # Convertir a dataframe para visualizacion sencilla pd.DataFrame({'feature name': iris.feature_names}) # - # #### Etiquetas (aka labels) # + import pandas as pd from sklearn import datasets iris = datasets.load_iris() # Conversión de dataframe para visualización sencilla pd.DataFrame({'target name': iris.target_names}) # - # > `sklearn` TIP: todos los datasets incluidos por defecto en la librería tienen por lo menos un atributo llamado `feature_names` y a veces `target_names`. # ### Conocer los datos - exploración # * Atributos (columnas/medidas) vienen de este diagrama # <img border="0" alt="iris data features" src="imgs/iris_petal_sepal.png" width="200" height="200"> # * Estructural # * Conocer los datos # * Resúmenes # <b>Estructura y representación<b> # + import pandas as pd from sklearn import datasets iris = datasets.load_iris() # How many data points (rows) x how many features (columns) print(iris.data.shape) print(iris.target.shape) # - # <b>Revisión rápida de los datos (y repaso de métodos explorativos en `pandas`)<b> # + # Conversión a df (Agregar nombres de columnas) iris.df = pd.DataFrame(iris.data, columns = iris.feature_names) # Primeras observaciones iris.df.head() # - # <b>Describir el dataset de manera básica estadísticamente<b> # summary stats iris.df.describe() # * No hay mucho qué hacer con el dataset de `iris`. No tiene valores faltantes, ya se encuentra en una estructura correcta para `sklearn`. Sin embargo, puede intentarse generar columnas estandarizadas o normalizadas. # ## Visualización # * Existen muchas maneras de visualizar datos: # + import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn import datasets iris = datasets.load_iris() iris.df = pd.DataFrame(iris.data, columns = iris.feature_names) iris.df['target'] = iris.target df = iris.df.loc[:, ['sepal length (cm)', 'target']] df['idx'] = list(range(0, 50))*3 df = df.pivot(index = 'idx', columns = 'target') df = np.array(df) plt.boxplot(df, labels = iris.target_names) plt.title('sepal length (cm)') # - # **Utilizando `pairplot` de `seaborn` es una manera rápida de ver los atributos que separan los datos.** import seaborn as sb sb.pairplot(pd.DataFrame(iris.data, columns = iris.feature_names)) # ### Preprocesamiento (Material Adicional) # <p>Algunas cosas a considerar antes de entrenar un estimador en `sklearn`:</p> # 1. Transformacions no-numéricas a numéricas (tip: usar applymap() con `pandas`) # * Reemplzar valores vacios # * Estandarización # * Normalización # * Factorizar atributos categóricos (e.g. one-hot encoding o dummy variables) # #### Estandarizacion - hacer que nuestro datos se parezcan a una distribución Gausiana estándar (comunmente usado para estimadores `sklearn` ) # > FYI: En problemas y ejemplos de sklearn, es común ver las columnas de atributos representadas como una **X** mayúscula, y las etiquetas objetivo representadas por una **y** minúscula. Esto es porque los datos se encuentran estructurados como un arreglo en 2D y la variable objetivo representada por un arreglo de 1D o lista de valores. # + # Estandarización/Escala from sklearn import preprocessing, datasets # Cargar el set de datos iris = datasets.load_iris() X, y = iris.data, iris.target # escalar a una distribución gausiana X_scaled = preprocessing.scale(X) # como se ve ahora pd.DataFrame(X_scaled).head() # + # let's just confirm our standardization worked (mean is 0 w/ unit variance) pd.DataFrame(X_scaled).describe() # also could: #print(X_scaled.mean(axis = 0)) #print(X_scaled.std(axis = 0)) # - # > PRO TIP: Para guardar esta estandarización y volverla a aplicar posteriormente, puede crearse un object transformer de la siguiente manera: # # ```python # scaler = preprocessing.StandardScaler().fit(X_train) # # aplicar a nuevos datos (e.g. set de pruebas): # scaler.transform(X_test) # ``` # #### Normalización - escalación <i>individual</i> de atributos para tener forma unitaria. # * Este tipo de escalación es importante para hacer ciertas transformaciones donde la similitud de pares y muestras es examinada. (ver docs de sklearn [acá](http://scikit-learn.org/stable/modules/preprocessing.html#normalization) para más) # # * Se puede ver un tutorial acerca de este tema [acá](http://freetext.org/Introduction_to_Linear_Algebra/Basic_Vector_Operations/Normalization/) # + # Estandarización aka escalación from sklearn import preprocessing, datasets # Cargar los datos iris = datasets.load_iris() X, y = iris.data, iris.target # Escalar usando una distribución gausiana X_norm = preprocessing.normalize(X, norm='l1') # Cómo se ve ahora? pd.DataFrame(X_norm).tail() # + # Confirmar que la estandarización fue exitosa pd.DataFrame(X_norm).describe() # cumulative sum of normalized and original data: #print(pd.DataFrame(X_norm.cumsum().reshape(X.shape)).tail()) #print(pd.DataFrame(X).cumsum().tail()) # Verificación de unidad (todos los atributos deben sumar 1 ahora) X_norm.sum(axis = 1) # - # > PRO TIP: Para guardar esta estandarización y volverla a aplicar posteriormente, puede crearse un object transformer de la siguiente manera: # # ```python # normalizer = preprocessing.Normalizer().fit(X_train) # # Aplicar a un nuevo dataset (e.g. set de pruebas): # normalizer.transform(X_test) # ``` # Created by a Microsoft Employee # Traducido por <NAME> [@fireblend](http://github.com/fireblend) # # The MIT License (MIT)<br> # Copyright (c) 2016 <NAME>
02. Nuestro Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.4 64-bit # name: python3 # --- # strings are mutable # str_object[start_pos:end_pos:step] # python String slicing always follows this rule: s[:i] + s[i:] == s for any index ‘i’. # python strings slicing s = 'Beautiful palace' print(s[:], '\n') print(s[::],'\n') first_five_chars = s[:5] print("First Five characters :", first_five_chars, "\n") third_to_fifth_chars = s[2:5] print(("Third_to_fifth_chars:", third_to_fifth_chars, "\n")) # problem 2: Slicing of strings using function slice py_string = 'learn python' # start = -1, stop = -4, step = -1 # includes indices -1, -2 and -3 slice_object1 = slice(-1, -6, -1) slice_object2 = slice(1, 6, -1) slice_object3 = slice(1, 6, 1) print(py_string[slice_object1], py_string[slice_object2],py_string[slice_object3]) # problem 3: Slicing of strings # Reverse a String using Slicing s = 'HelloWorld' reverse_str = s[::-1] print(reverse_str) s1 = s[2:8:2] print(s1) s1 = s[8:1:-1] print(s1) # Python slice works with positive indexes s1 = s[-4:-2] s1 = s[8:1:-2] print("Python slice works with positive indexes ",s1) # Python slice works with negative indexes s1 = s[-4:-2] print("Python slice works with negative indexes ", s1) s = 'Python' s1=s[100:] s1= s[2:50] # problem 4: # Program to get a substring from the given string py_string = 'Python book' # contains 0, 1 and 2 indices slice_object = slice(3) print("substring is : ", py_string[slice_object]) # Pyt # start = 1, stop = 6, step = 2 # contains 1, 3 and 5 indices slice_object = slice(1, 6, 2) print("substring :", py_string[slice_object]) # yin # problem 5: py_list = ['P', 'y', 't', 'h', 'o', 'n'] py_tuple = ('P', 'y', 't', 'h', 'o', 'n') # indices 0, 1 and 2 slice_object = slice(3) print(py_list[slice_object])# ['P', 'y', 't'] # indices 1 and 3 slice_object = slice(1, 5, 2) print(py_tuple[slice_object]) # ('y', 'h') slice_object = slice(-1, -4, -1) print(py_list[slice_object]) # ['n', 'o', 'h'] # indices -1 and -3 slice_object = slice(-1, -5, -2) print(py_tuple[slice_object]) # ('n', 'h') py_string = 'Python' # indices 0, 1 and 2 print(py_string[0:3]) # Pyt # indices 1 and 3 print(py_string[1:5:2])
OOP/Practice Sessions/StringsNotebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![Notebook Tutorial](__code/__all/notebook_tutorial.png)](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/extract_evenly_spaced_files) # # <img src='__docs/__all/notebook_rules.png' /> # + [markdown] run_control={"frozen": false, "read_only": false} # # Select Your IPTS # + run_control={"frozen": false, "read_only": false} from __code.extract_evenly_spaced_files import ExtractEvenlySpacedFiles as EESF from __code import system system.System.select_working_dir() from __code.__all import custom_style custom_style.style() # + [markdown] run_control={"frozen": false, "read_only": false} # # Select Folder with Images to Extract # + run_control={"frozen": false, "read_only": false} o_extract = EESF(working_dir=system.System.get_working_dir()) o_extract.select_folder() # + [markdown] run_control={"frozen": false, "read_only": false} # # Extraction Method to Use # + run_control={"frozen": false, "read_only": false} o_extract.how_to_extract() # - # # Renamed files ? # This will replace the last part of the name (file counter digit) # # for example: # # original first file: 20191030_object1_0070_004_594_0003.tiff # new first file name: 20191030_object1_0070_004_594_0000.tiff o_extract.renamed_files() # + [markdown] run_control={"frozen": false, "read_only": false} # # Select Output Folder # + run_control={"frozen": false, "read_only": false} o_extract.select_output_folder() # -
notebooks/extract_evenly_spaced_files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np df = pd.read_csv('medical_examination.csv') # - df # note on the data # age is in DAYS # height is in CM # weight is in KG # add a column based on BMI # based on what the project wants df['overweight'] = df.apply(lambda x: 1 if (x.weight / (np.square(x.height / 100))) > 25 else 0, axis=1) df # + # normalizing data to turn 0s and 1s to 0s, rep'ing good, above that bad def normalize_values(value, **kwargs): if value <= 1: value = 0 else: value = 1 return value for i in range(7, 9): df.iloc[:, i] = df.apply(lambda x: normalize_values(x[i]), axis=1) df # - # convert df from wide to long using pd.melt # only use values for 'cholesterol', 'gluc', 'smoke', 'alco', 'active' and 'overweight' df_cat = pd.melt(df, id_vars='cardio', value_vars=['cholesterol', 'gluc', 'smoke', 'alco', 'active', 'overweight']) df_cat = pd.melt(df, id_vars='cardio', value_vars=['active', 'alco', 'cholesterol', 'gluc', 'overweight', 'smoke']) # df_cat = df_cat.groupby(by=['variable', 'value']).count() df_cat # courtesy of ArbyC on freeCodeCamp forums fig = sns.catplot( data=df_cat, kind='count', x='variable', hue='value', col='cardio' ) fig.set_ylabels('total') fig.axes.get_children() df.shape # + # clean data for heatmap # does the ap_lo <= ap_hi cleaning df_heat = df[(df['ap_lo'] <= df['ap_hi'])] # remove height under 2.5th percentile df_heat = df_heat[(df['height'] >= df['height'].quantile(0.025))] # remove height over 97.5 percentile df_heat = df_heat[(df['height'] <= df['height'].quantile(0.975))] # remove weight under 2.5th percentile df_heat = df_heat[(df['weight'] >= df['weight'].quantile(0.025))] # remove weight above 97.5 percentile df_heat = df_heat[(df['weight'] <= df['weight'].quantile(0.975))] df_heat # + corr = df_heat.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) fig, ax = plt.subplots(figsize=(11,9)) sns.heatmap(corr, mask=mask, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={'shrink': .5}, annot=True, fmt=".1f")
sketches.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="vw6G3xPpa9Me" colab_type="text" # # Simple Python I/O # The most common I/O functions in Python are `input()` and `print()`. These are not the only I/O functions in Python. The Python `input()` function will print a prompt and then wait for text from the user. The function then returns the text. The Python `print()` function will output text to the terminal. It is similar to the `printf()` function in C or `console.log()` in JavaScript. # + [markdown] id="QoWlhHWwbImm" colab_type="text" # ## Terminal Output # In Python, almost everything is printable by default, without conversion. Strings, Numbers, Sequences, Booleans, Dictionaries, Sets and many other classes of objects can be printed with no special syntax. Print does not have a return value, we simply call the function. A Class instance can be printed properly iff the Class has a `__str__` and/or `__repr__` method. More on Classes will be found in the Classes notebook. Expressions can be printed directly, iff it resolves to a printable value. Expression Example: `print(2 + 4 * 8)` # # The following code prints `Hello, Python!` to the terminal. # + id="PVu_jDeqbMca" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dfd26cab-655a-4154-ac86-6a36ca88d423" print("Hello, Python!") # + [markdown] id="3RYUiLfu0TIF" colab_type="text" # #### Separator Examples: # # The print function can take an unbounded number of arguments. We can separate these items with the `sep` keyword argument. The `sep` value must be a string. See the examples below... # + id="-TQDTYCP0cJj" colab_type="code" outputId="214ef06e-4ee7-4ed9-f6fa-30206687f9e4" colab={"base_uri": "https://localhost:8080/", "height": 119} print("Alpha", "Beta", "Gamma") # By default sep=' ' print("Alpha", "Beta", "Gamma", sep=", ") # Comma and space print("Alpha", "Beta", "Gamma", sep="\t") # Tab print("Alpha", "Beta", "Gamma", sep="\n") # New line # + [markdown] id="OZcbv-8ab0wd" colab_type="text" # ## Terminal Input # Input from `input()` is always read # as text (even numbers!), more specifically it's a string, aka `str` type. # The text from the user becomes the output (return value) of the input function. # The return value of any function can be assigned to a variable. # # The follwing code prints `What's your name? ` and waits for input from the user. Then it stores the user input into the `name` variable as a string. # + id="Y9BfybTyayV7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="da5aadda-7d1e-4995-8c6f-3acc81da2e67" name = input("What's your name? ") # + [markdown] id="QgRdGcZNeZZ0" colab_type="text" # The next line of code will print the greeting. # + id="tsBBzU8VcTsc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="11411f3f-99b5-47c1-bdce-56b9074<PASSWORD>" print("Hello,", name) # + [markdown] id="sx7NUIWNhXVS" colab_type="text" # ## Run Your Python Script # # First, navigate to the directory holding your script, then type the following: # # ``` # $ python3 A01_hello_world.py # ``` # # The `$` above represents the shell prompt, yours may look different. You should only copy the code that follows it. In other words: you never need to type the `$` prompt. If the terminal prompt instead looks like this... # ``` # >>> # ``` # Then you are currently in the Python REPL. First exit the REPL with the `exit()` function, then run your script. # # ### Your first script should resemble the following: # # ```python # # File Name: A01_hello_world.py # """ # Author: <Your Name> # Team: <Your Team> # Date: <Today> # """ # # name = input("What's your name? ") # print("Hello,", name) # ``` # # The first few lines of this script are comments. The first line is a single-line comment - marked by the `#` symbol. It is followed by a multi-line comment - indicated by the triple quotes before and after the text. Comments will be correctly ignored by Python. More info on comments to come later. For now, it is good enough to know that code comments are specifically for the human readers of your code, including future you. # # + id="PwIA6MYVuuNs" colab_type="code" colab={}
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Intro_to_Python/01_hello_world.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- { "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# NLP Lecture @ Strive School - 21st July 2021\n", "# CHATBOT with Pytorch\n", "\n", "'''\n", "Chatbots are essential for speeding up user assistance and reducing waiting times. Chatbots can quickly extract important information such as demographics, symptoms, health insurance information and assist any patients by making appointments with specialists.\n", "\n", "Imagine having to design a tool that allows preliminary assistance for those who must access a treatment path or must make a reservation for a specialist visit.\n", "\n", "Create a dataset using the template provided as a base and prepare at least 5 different intents with 4/5 responses each.\n", "\n", "The final result must ensure that users can have a dialogue of at least 3 questions and 3 answers consistent with the context.\n", "\n", "Example\n", "A: Hello MedAssistant.\n", "B: Hello. How can I help you?\n", "A: I don't feel well.\n", "B: Do you have any symptoms?\n", "A: I have cough and nausea.\n", "B: Do you want to book an appointment?\n", "A: Yes, please, for tomorrow.\n", "\n", "\n", "Info:\n", "- Feel free to change or arrange a new dataset of intents\n", "- Try experimenting and tuning with the hyperparameters\n", "- Feel free to use or change the code you've seen during the morning session\n", "- TBD = To be done (from you!) :)\n", "'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import json\n", "\n", "import torch\n", "import torch.nn as nn\n", "from torch.utils.data import Dataset, DataLoader\n", "\n", "from nltk_utils import bag_of_words, tokenize, stem\n", "from model import NeuralNet\n", "\n", "# STEP 0: find intents patterns\n", "\n", "with open('intents.json', 'r') as f:\n", " intents = json.load(f)\n", "\n", "all_words = []\n", "tags = []\n", "patterns = []\n", "\n", "for intent in intents['intents']:\n", "# TBD: loop through each sentence in our intents patterns, create a list of tags and define the patterns\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 1: Pre-process of the input\n", "\n", "# lower case? stemming? stopwords?\n", "# TBD" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 2: Define training data through a bag of words\n", "\n", "X_train = []\n", "y_train = []\n", "for (pattern_sentence, tag) in patterns:\n", " bag = bag_of_words(pattern_sentence, all_words)\n", " X_train.append(bag)\n", " label = tags.index(tag)\n", " y_train.append(label)\n", "\n", "X_train = np.array(X_train)\n", "y_train = np.array(y_train)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 3: Configure the neural network\n", "\n", "# define each parameter that is equal to 0 using an empirical value or a value based on your experience\n", "# TBD\n", "\n", "num_epochs = 0\n", "batch_size = 0\n", "learning_rate = 0.001\n", "input_size = len(X_train[0])\n", "hidden_size = 0\n", "output_size = len(tags)\n", "\n", "# STEP 4: Train the model\n", "\n", "class ChatDataset(Dataset):\n", "\n", " def __init__(self):\n", " self.n_samples = len(X_train)\n", " self.x_data = X_train\n", " self.y_data = y_train\n", "\n", " # support indexing such that dataset[i] can be used to get i-th sample\n", " def __getitem__(self, index):\n", " return self.x_data[index], self.y_data[index]\n", "\n", " # we can call len(dataset) to return the size\n", " def __len__(self):\n", " return self.n_samples\n", "\n", "dataset = ChatDataset()\n", "train_loader = DataLoader(dataset=dataset,\n", " batch_size=batch_size,\n", " shuffle=True,\n", " num_workers=0)\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "model = NeuralNet(input_size, hidden_size, output_size).to(device)\n", "\n", "# Define loss and optimizer: which one is the best one?\n", "# TBD\n", "criterion = None\n", "optimizer = None\n", "\n", "# Train the model\n", "for epoch in range(num_epochs):\n", " for (words, labels) in train_loader:\n", " words = words.to(device)\n", " labels = labels.to(dtype=torch.long).to(device)\n", "\n", " # Forward pass\n", " outputs = model(words)\n", " # if y would be one-hot, we must apply\n", " # labels = torch.max(labels, 1)[1]\n", " loss = criterion(outputs, labels)\n", "\n", " # Backward and optimize\n", " optimizer.zero_grad()\n", " loss.backward()\n", " optimizer.step()\n", "\n", "\n", "data = {\n", " \"model_state\": model.state_dict(),\n", " \"input_size\": input_size,\n", " \"hidden_size\": hidden_size,\n", " \"output_size\": output_size,\n", " \"all_words\": all_words,\n", " \"tags\": tags\n", "}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 5: Save the model\n", "\n", "# TBD: name and save the model" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 6: Test the model\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "# TBD: Load the intents file\n", "\n", "# TBD: Retrieve the model and all the sizings\n", "\n", "# TBD: build the NN\n", "\n", "# TBD: prepare a command-line conversation (don't forget something to make the user exit the script!)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.13" } }, "nbformat": 4, "nbformat_minor": 2 }
assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import csv import json import time import ciso8601 import pandas as pd from datetime import datetime """ Total Machine Numbers: 552 Total GPU Numbers: 2490 × 2 GPU Machine(12GB) Numbers: 321 × 8 GPU Machine(24GB) Numbers: 231 × ************* Revised ***************** Total Machine Numbers: 552 - 13 Total GPU Numbers: 3205 8 GPU Machine Numbers: 264 4 GPU Machine Numbers: 271 3 GPU Machine Numbers: 1 2 GPU Machine Numbers: 3 0 GPU Machine Numbers: 13 Number of total jobs: 117325 Number of no recorded attempt jobs: 5236 Number of no recorded start jobs: 136 Number of no recorded end jobs: 6695 15 VC with "ip" nums [409, 412, 409, 388, 414, 226, 399, 386, 389, 389, 301, 85, 10, 5, 12] """ """Constants""" LOGDIR = './data' DATE_FORMAT_STR = '%Y-%m-%d %H:%M:%S' TOTAL_GPU_NUM = 3205 TOTAL_Machine_NUM = 552 - 13 LOG_START = '2017-08-14 23:27:00' # '2017-08-07 10:03:01' LOG_END = '2017-12-22 08:03:22' START_TS = ciso8601.parse_datetime(LOG_START).timestamp() END_TS = ciso8601.parse_datetime(LOG_END).timestamp() NUM_Time_Interval = int((END_TS - START_TS)/60)+1 HEADERS = ['Timestamp', 'Total_GPU_Num', 'Idle_GPU_Num', 'GPUJob_Num', 'Running_GPUJob_Num', 'Pending_GPUJob_Num', 'Running_GPU_Num', 'Pending_GPU_Num', 'Pending_Morethan_8GPUJob_Num', 'Pending_8GPUJob_Num', 'Pending_4GPUJob_Num', 'Pending_2GPUJob_Num', 'Pending_1GPUJob_Num', 'Pending_Lessthan_8GPUJob_Num', 'Idle_Machine_8GPU', 'Idle_Machine_5_7GPU', 'Idle_Machine_4GPU', 'Idle_Machine_3GPU', 'Idle_Machine_2GPU', 'Idle_Machine_1GPU'] """Data Load and Preprocessing""" cluster_job_log_path = os.path.join(LOGDIR, 'cluster_job_log') cluster_machine_list_path = os.path.join(LOGDIR, 'revised_machine_list.csv') with open(cluster_job_log_path, 'r') as f: data = json.load(f) machine_df = pd.read_csv(cluster_machine_list_path) machine_df['idle_gpu_num'] = machine_df['number of GPUs'] machine_df['deploy_times'] = 0 machine_df.index = machine_df['machineId'] machine_df = machine_df.drop(columns=['Unnamed: 0', 'machineId']) # Original machine list preprocessing # cluster_machine_list_path = os.path.join(LOGDIR, 'cluster_machine_list') # machine_df['number of GPUs'] = machine_df[' number of GPUs'] # machine_df['idle_gpu_num'] = machine_df[' number of GPUs'] # machine_df['deploy_times'] = 0 # machine_df.index = machine_df['machineId'] # machine_df = machine_df.drop(columns=[' number of GPUs', ' single GPU mem', 'machineId']) # + cluster_machine_list_path = os.path.join(LOGDIR, 'revised_machine_list.csv') #'cluster_machine_list' with open(cluster_job_log_path, 'r') as f: data = json.load(f) machine_df = pd.read_csv(cluster_machine_list_path) machine_df['idle_gpu_num'] = machine_df['number of GPUs'] machine_df['deploy_times'] = 0 machine_df.index = machine_df['machineId'] machine_df = machine_df.drop(columns=['Unnamed: 0', 'machineId']) # - class ClusterItem: """Time Sequence Items""" def __init__(self, key): self._timestamp = str(datetime.fromtimestamp(START_TS + key * 60)) self._total_gpu_number = TOTAL_GPU_NUM self._gpujob_number = 0 self._running_gpujob_number = 0 self._pending_gpujob_number = 0 self._running_gpu_number = 0 self._pending_gpu_number = 0 self._idle_gpu_number = 0 self._pending_8gpujob_number = 0 self._pending_4gpujob_number = 0 self._pending_2gpujob_number = 0 self._pending_1gpujob_number = 0 self._pending_more8_gpujob_number = 0 # Request more than 8 GPU jobs self._pending_less8_gpujob_number = 0 # Refer to 3, 5, 6, 7 GPU jobs self._idle_machine_8GPU = 0 self._idle_machine_5_7GPU = 0 self._idle_machine_4GPU = 0 self._idle_machine_3GPU = 0 self._idle_machine_2GPU = 0 self._idle_machine_1GPU = 0 self._running_machines = [] self._running_inside_machines = [] def tuple(self): return ( self._timestamp, self._total_gpu_number, self._total_gpu_number - self._running_gpu_number, self._gpujob_number, self._running_gpujob_number, self._pending_gpujob_number, self._running_gpu_number, self._pending_gpu_number, self._pending_more8_gpujob_number, self._pending_8gpujob_number, self._pending_4gpujob_number, self._pending_2gpujob_number, self._pending_1gpujob_number, self._pending_less8_gpujob_number, self._idle_machine_8GPU, self._idle_machine_5_7GPU, self._idle_machine_4GPU, self._idle_machine_3GPU, self._idle_machine_2GPU, self._idle_machine_1GPU, ) # + """Parse Data into Job List""" num_submit_jobs = len(data) num_no_attempt_jobs = 0 num_no_start_jobs = 0 num_no_end_jobs = 0 joblist, vc = [], {} for job in data: if len(job['attempts']) == 0: num_no_attempt_jobs += 1 continue for amp in range(len(job['attempts'])): j = [] if job['attempts'][amp]['start_time'] == 'None': num_no_start_jobs += 1 continue if job['attempts'][amp]['end_time'] == 'None': # !!!!!!!!!!!!!!!! num_no_end_jobs += 1 continue if amp > 0 and job['attempts'][amp-1]['end_time'] == 'None': continue j.append(job['jobid']+'-attempt'+str(amp)) j.append(job['vc']) if not job['vc'] in vc: vc[job['vc']] = set() j.append(job['user']) j.append(job['status']) # !!!!!!!!!!!!!!! if amp == 0: j.append(job['submitted_time']) j.append(job['attempts'][amp]['start_time']) j.append(job['attempts'][amp]['end_time']) else: j.append(job['attempts'][amp-1]['end_time']) j.append(job['attempts'][amp]['start_time']) j.append(job['attempts'][amp]['end_time']) node_list, gpu_num = [], [] for g in job['attempts'][amp]['detail']: machine_df.at[g['ip'], 'deploy_times'] += 1 gpu_num.append(len(g['gpus'])) node_list.append(g['ip']) vc[job['vc']].add(g['ip']) j.append(sum(gpu_num)) j.append(gpu_num) j.append(node_list) joblist.append(j) # print('Number of no recorded attempt jobs: ', num_no_attempt_jobs) # print('Number of no recorded start jobs: ', num_no_start_jobs) # print('Number of no recorded end jobs: ', num_no_end_jobs) # + """Create Time Sequence List""" cluster_data = [] for i in range(NUM_Time_Interval): cluster_data.append(ClusterItem(i)) ss = time.perf_counter() """From Job Log to Time Sequence""" for job in joblist: submit = int((ciso8601.parse_datetime(job[4]).timestamp() - START_TS)/60) start = int((ciso8601.parse_datetime(job[5]).timestamp() - START_TS)/60) end = int((ciso8601.parse_datetime(job[6]).timestamp() - START_TS)/60) for t in range(submit, start+1): cluster_data[t]._gpujob_number += 1 cluster_data[t]._pending_gpujob_number += 1 cluster_data[t]._pending_gpu_number += int(job[7]) if int(job[7]) == 1: cluster_data[t]._pending_1gpujob_number += 1 elif int(job[7]) == 2: cluster_data[t]._pending_2gpujob_number += 1 elif int(job[7]) == 4: cluster_data[t]._pending_4gpujob_number += 1 elif int(job[7]) == 8: cluster_data[t]._pending_8gpujob_number += 1 elif int(job[7]) > 8: cluster_data[t]._pending_more8_gpujob_number += 1 else: cluster_data[t]._pending_less8_gpujob_number += 1 for t in range(start, end+1): cluster_data[t]._gpujob_number += 1 cluster_data[t]._running_gpujob_number += 1 cluster_data[t]._running_gpu_number += int(job[7]) cluster_data[t]._running_machines cluster_data[t]._running_machines.append(job[-1]) cluster_data[t]._running_inside_machines.append(job[-2]) ee = time.perf_counter() print('Time Usage:', round(ee-ss, 2)) # + ss = time.perf_counter() for t in range(NUM_Time_Interval):#NUM_Time_Interval assert len(cluster_data[t]._running_machines) == len(cluster_data[t]._running_inside_machines) machines = machine_df.copy() for k in range(len(cluster_data[t]._running_machines)): for i in range(len(cluster_data[t]._running_machines[k])): machines.at[cluster_data[t]._running_machines[k][i], 'idle_gpu_num'] -= cluster_data[t]._running_inside_machines[k][i] machine_state = machines['idle_gpu_num'].value_counts() for i in range(9): if i not in machine_state.index: machine_state[i] = 0 cluster_data[t]._idle_machine_8GPU = machine_state[8] cluster_data[t]._idle_machine_4GPU = machine_state[4] cluster_data[t]._idle_machine_2GPU = machine_state[2] cluster_data[t]._idle_machine_1GPU = machine_state[1] cluster_data[t]._idle_machine_3GPU = machine_state[3] cluster_data[t]._idle_machine_5_7GPU = machine_state[5] + machine_state[6] + machine_state[7] ee = time.perf_counter() print('Time Usage:', round(ee-ss, 2)) with open('./timeseq.csv', 'w') as cluster: writer = csv.writer(cluster) writer.writerow(HEADERS) for i in cluster_data: writer.writerow(i.tuple()) # + """Job List Sample""" TEST_INDEX = 67863 capacity = [] for k in range(len(cluster_data[TEST_INDEX]._running_machines)): ls = [] for i in range(len(cluster_data[TEST_INDEX]._running_machines[k])): ls.append(machine_df.at[cluster_data[TEST_INDEX]._running_machines[k][i], 'number of GPUs']) capacity.append(ls) d = {'machine':cluster_data[TEST_INDEX]._running_machines, 'capacity': capacity, 'gpu_deploy':cluster_data[TEST_INDEX]._running_inside_machines} df = pd.DataFrame(data=d) df[20:50] # -
philly_analyse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from LSTM_VGG16.runCNN_LSTM import * # + output_begin = 3 num_outputs = 3 timesteps = 1 # TimeseriesGenerator Handles overlapping learning_rate = 0.0001 in_epochs = 1 out_epochs = 1 train_batch_size = 50 test_batch_size = 50 subjectList = [1, 2] #, 3, 4, 5, 7, 8, 11, 12, 149[i for i in range(1, 9)] + [i for i in range(10, 25)] except [6, 13, 10, ] testSubjects = [3, 4] trainingSubjects = [s for s in trainingSubjects if not s in testSubjects] # - vgg_model, full_model = getFinalModel(timesteps = timesteps, lstm_nodes = lstm_nodes, lstm_dropout = lstm_dropout, lstm_recurrent_dropout = lstm_recurrent_dropout, num_outputs = num_outputs, lr = learning_rate, include_vgg_top = include_vgg_top) full_model = trainCNN_LSTM(full_model, out_epochs, subjectList, timesteps, output_begin, num_outputs, batch_size = train_batch_size, in_epochs = in_epochs) means, results = evaluateCNN_LSTM(full_model, label_rescaling_factor = label_rescaling_factor, testSubjects = testSubjects, timesteps = timesteps, output_begin = output_begin, num_outputs = num_outputs, batch_size = test_batch_size) def drawPlotsForSubj(outputs, subj, subjID, modelID, num_outputs = num_outputs, angles = angles): if num_outputs == 1: angles = ['Yaw'] colors = ['#FFAA00', '#00AA00', '#0000AA', '#AA0000'] title = 'Estimations for the Subject %d (Subject ID: %s, Total length: %d)\nby the Model %s' % (subj, subjID, outputs[0][0].shape[0], modelID) red, blue = (1.0, 0.95, 0.95), (0.95, 0.95, 1.0) f, rows = plt.subplots(num_outputs, 1, sharey=True, sharex=True, figsize=(16, 3*num_outputs)) f.suptitle(title) for i, (matrix, absolute_mean_error) in enumerate(outputs): cell = rows if num_outputs > 1: cell = rows[i] l1 = cell.plot(matrix[:, 0], colors[i], label='Ground-truth') l2 = cell.plot(matrix[:, 1], colors[-1], label='Estimation') cell.set_facecolor(red if 'F' in subjID else blue) #cell.set_xlim([0, 1000]) cell.set_ylim([-label_rescaling_factor, label_rescaling_factor]) cell.set_ylabel('%s Angle\nAbsolute Mean Error: %.2f' % (angles[i], absolute_mean_error)) f.subplots_adjust(top=0.93, hspace=0, wspace=0) plt.savefig('foo.png', bbox_inches='tight') def drawResults(outputs, modelID, num_outputs = num_outputs, angles = angles): for subject, outputs in results: drawPlotsForSubj(outputs, subject, BIWI_Subject_IDs[subject], modelID, angles = angles) drawResults(results[:1], 'M000', num_outputs = num_outputs, angles = ['Pitch', 'Yaw', 'Roll']) drawPlotsForSubj(outputs, 9, 'M09', 'M000')
DeepRL_For_HPE/AutomatedCNN-LSTM_runner_v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Trong khoá học này, chúng ta sẽ sử dụng ngôn ngữ lập trình Python cho tất cả các bài tập. Python là một trong những ngôn ngữ lập trình đa năng tuyệt vời và có những thư viện phổ biến như python, scipy, matplotlib... Chính vì vậy mà Python trở thành ngôn ngữ được sử dụng rộng rãi trong giới khoa học máy tính. # # Bài viết này được viết dựa trên nội dung trong [CS228 Python Tuorial](https://github.com/kuleshov/cs228-material/blob/master/tutorials/python/cs228-python-tutorial.ipynb) # # ### Giới Thiệu Về Python # # Python là một ngôn ngữ lập trình bậc cao. Những dòng Code của Python thường được cho là gần giống với mã giả, vì bạn có thể thực hiện những ý tưởng trong một vài dòng Code và rất dễ đọc. # # Ví dụ về thuật toán sắp xếp Quicksort trong Python: # # + def quicksort(arr): if len(arr) <= 1: return arr pivot = arr[len(arr) // 2] left = [x for x in arr if x < pivot] middle = [x for x in arr if x == pivot] right = [x for x in arr if x > pivot] return quicksort(left) + middle + quicksort(right) print(quicksort([3,6,8,10,1,2,1])) # - # Trong bài viết này, chúng ta sẽ tìm hiểu về các nội dung cơ bản sau đây: # 1. Kiểu dữ liệu # 2. Containers # - Lists (Kiểu danh sách) # - Dictionaries (Kiểu từ điển) # - Sets (Kiểu set) # - Tuples (Kiểu Tuple) # 3. Functions (hàm) # 4. Classes (lớp) # **Lưu ý:** trong toàn bộ nội dung của khoá này sẽ sử dụng trên phiên bản 3.x. # # Các bạn có thể kiểm tra phiên bản hiện tại trong Command Line bằng dòng lệnh: `python --version`. Hoặc kiểm tra phiên bản trong Jupyter Notebook bằng dòng lệnh sau: from platform import python_version print(python_version()) # ### 1. Kiểu dữ liệu # # Cũng giống như các ngôn ngữ khác, Python có các kiểu dữ liệu cơ bản bao gồm: integers, floats, booleans, và strings. # # #### 1.1 Numbers (Kiểu số) # # Kiểu integers (số nguyên) / floats (số thực) hoạt động giống như với các ngôn ngữ khác. ### Kiểu số nguyên (integers) x = 3 print(type(x)) # Prints "<class 'int'>" print(x) # Prints "3" print(x + 1) # Thực hiện phép cộng; prints "4" print(x - 1) # Thực nhiên phép trừ; prints "2" print(x * 2) # Thực hiện phép nhân; prints "6" print(x ** 2) # Thực hiện phép mũ; prints "9" x += 1 print(x) # Prints "4" x *= 2 print(x) # Prints "8" ### Kiểu số thực (floats) y = 2.5 print(type(y)) # Prints "<class 'float'>" print(y, y + 1, y * 2, y ** 2) # Prints "2.5 3.5 5.0 6.25" # **Chú ý:** không giống như các ngôn ngữ khác, Python không có các phép toán `x++` hay `x--`. # # Tìm hiểu thêm về các phép toán trong Python tại [trang docs chính thức](https://docs.python.org/3.5/library/stdtypes.html#numeric-types-int-float-complex) # #### 1.2 Booleans # # Python thực hiện tất cả các phép toán thông thường trong đại số Boolean. # # Chúng ta có thể sử dụng các từ tiếng Anh thay cho các ký tự `&&`, `||`, v.v... t = True f = False print(type(t)) print(t and f) # Logical AND; prints "False" print(t or f) # Logical OR; prints "True" print(not t) # Logical NOT; prints "False" print(t != f) # Logical XOR; prints "True" # #### 1.3 Strings # Python hỗ trợ rất tuyệt vời cho kiểu chuỗi: hello = 'hello' # Chuỗi ký tự sử dụng dấu nháy ' ' world = "world" # Hoặc sử dụng dấu nháy " " print(hello) # Prints "hello" print(len(hello)) # lấy độ dài của chuỗi; prints "5" hw = hello + ' ' + world # Kết nối chuỗi print(hw) # prints "hello world" hw12 = '%s %s %d' % (hello, world, 12) # định dạng chuỗi kiểu sprintf print(hw12) # prints "hello world 12" # Ngoài ra, còn có rất nhiều hàm hỗ trợ, các bạn có thể xem thêm trong [trang docs chính thức](https://docs.python.org/3.5/library/stdtypes.html#string-methods). # # Dưới đây là một số hàm phổ biến: # + s = "hello" # Viết hoa chữ cái đầu tiên print(s.capitalize()) # Viết hoa toàn bộ chữ cái print(s.upper()) # căn giữa print(s.center(30)) # loại bỏ dấu cách thừa print(' world '.strip()) # - # ### 2. Containers # # Trong Python có các kiểu chứa các dữ liệu như: Lists, Dictionaries, Sets, Tuples. # #### 2.1 Lists # # List trong Python là một dạng dữ liệu cho phép lưu trữ nhiều kiểu dữ liệu khác nhau, và chúng ta có thể thay đổi kích thước của nó: xs = [3, 1, 2] # Tạo một list print(xs, xs[2]) # Prints "[3, 1, 2] 2" print(xs[-1]) # Các số âm thể hiện việc truy cập ngược từ cuối; prints "2" xs[2] = 'foo' # List có thể chứa các phần tử khác nhau print(xs) # Prints "[3, 1, 'foo']" xs.append('bar') # Thêm phần tử mới vào list print(xs) # Prints "[3, 1, 'foo', 'bar']" x = xs.pop() # Xoá phần tử khỏi list print(x, xs) # Prints "bar [3, 1, 'foo']" # Đọc thêm về List: [Python List](https://docs.python.org/3.5/tutorial/datastructures.html#more-on-lists) # **Slicing (cắt List):** ngoài việc truy cập từng phần tử trong List, Python cung cấp cú pháp ngắn gọn để truy cập nhiều phần tử (sublists). # # **Chú ý:** việc cắt List này không ảnh hưởng đến List ban đầu. nums = list(range(5)) # tạo một list từ 0 đến 4 print(nums) # Prints "[0, 1, 2, 3, 4]" print(nums[2:4]) # lấy phần tử có vị trí 2 đến 4 (exclusive); prints "[2, 3]" print(nums[2:]) # lấy phần tử có vị trí 2 đến hết; prints "[2, 3, 4]" print(nums[:2]) # lấy phần tử từ đầu đến phần tử có vị trí 2 (exclusive); prints "[0, 1]" print(nums[:]) # lấy toàn bộ list; prints "[0, 1, 2, 3, 4]" print(nums[:-1]) # chỉ số có thể là số âm; prints "[0, 1, 2, 3]" nums[2:4] = [8, 9] # thay đổi nhiều phần tử cùng lúc print(nums) # Prints "[0, 1, 8, 9, 4]" # **Vòng lặp:** Bạn cũng có thể truy cập các phần tử của List bằng vòng lặp. animals = ['cat', 'dog', 'monkey'] for animal in animals: print(animal) # Nếu bạn muốn truy cập vào chỉ mục của từng phần tử của List, sử dụng hàm `enumerate`: animals = ['cat', 'dog', 'monkey'] for idx, animal in enumerate(animals): print('#%d: %s' % (idx + 1, animal)) # **List comprehensions:** trong quá trình lập trình, chúng ta thường xuyên muốn thay đổi giá trị của các phần tử đồng loạt. # # Ví dụ: nhân giá trị của các phần tử bên trong List với 2. # Chúng ta không thực hiện được nhân với 2 theo cách sau. Điều này chỉ nhân số lượng thêm 2 lần. nums = [0, 1, 2, 3, 4] nums * 2 # Thay vào đó, chúng ta có thể sử dụng vòng lặp: nums = [0, 1, 2, 3, 4] squares = [] for x in nums: squares.append(x ** 2) print(squares) # Chúng ta có thể làm cách trên đơn giản hơn bằng List Comprehensions: nums = [0, 1, 2, 3, 4] squares = [x ** 2 for x in nums] print(squares) # Chúng ta có thể sử dụng thêm các điều kiện khi dùng List Comprehension: nums = [0, 1, 2, 3, 4] even_squares = [x ** 2 for x in nums if x % 2 == 0] print(even_squares) # #### 2.2 Dictionaries # # Dictionary trong Python là một tập hợp các cặp (key, value) không có thứ tự. Nó là một container mà chứa dữ liệu, được bao quanh bởi các dấu ngoặc móc đơn {}. # # Đọc thêm về [Python Dictionaries](https://docs.python.org/3.5/library/stdtypes.html#dict) d = {'cat': 'cute', 'dog': 'furry'} # tạo một Dict print(d['cat']) # Lấy giá trị có key = cat; prints "cute" print('cat' in d) # kiểm tra cat có trong dict ? d['fish'] = 'wet' # thêm một cặp key, value trong Dicts print(d['fish']) # Prints "wet" # Sẽ bị lỗi nếu `key` không tồn tại trong Dict print(d['monkey']) # Thay vào đó chúng ta sẽ sử dụng lệnh `get` print(d.get('monkey', 'N/A')) # Lấy giá trị có key = 'monkey' không có in ra 'N/A'; prints "N/A" print(d.get('fish', 'N/A')) # Lấy giá trị có key = 'fish'không có in ra 'N/A'; prints "wet" # **Vòng lặp:** chúng ta có thể dễ dàng in các giá trị trong Dict bằng vòng lặp. d = {'person': 2, 'cat': 4, 'spider': 8} for animal in d: legs = d[animal] print('A %s has %d legs' % (animal, legs)) # **Dictionary comprehensions:** Cũng giống như List Comprehensions. nums = [0, 1, 2, 3, 4] even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0} print(even_num_to_square) # #### 2.3 Sets # # Set là một kiểu chứa dữ liệu đặc biệt, nó chỉ chứa các giá trị riêng biệt không trùng lặp nhau. # # # animals = {'cat', 'dog'} print('cat' in animals) # kiểm tra phần tử trong set; prints "True" print('fish' in animals) # prints "False" animals.add('fish') # thêm phần tử vào set print('fish' in animals) # Prints "True" print(len(animals)) # số lượng bên trong set; prints "3" print(animals) animals.add('cat') # thêm phần tử vào set, nhưng `cat` đã có nên set không thay đổi print(len(animals)) # Prints "3" print(animals) animals.remove('cat') # xoá phần tử trong set print(len(animals)) # Prints "2" print(animals) # **Vòng lặp:** Bạn cũng có thể truy cập các phần tử của Set bằng vòng lặp. animals = {'cat', 'dog', 'fish'} for idx, animal in enumerate(animals): print('#%d: %s' % (idx + 1, animal)) # **Set comprehensions:** giống như Lists và Dictionaries, chúng ta cũng có thể tạo Set bằng phương thức Set Comprehensions: from math import sqrt nums = {int(sqrt(x)) for x in range(30)} print(nums) # #### 2.4 Tuples # # Tuple trong Python là một kiểu dữ liệu dùng để lưu trữ các đối tượng không thay đổi về sau (giống như hằng số). # + day = ('monday', 'tuesday', 'wednesday', 'thursday') print(type(day)) # Lấy kiểu dữ liệu print(day) # in toàn bộ tuples print(day[1]) # truy cập vào phần tử index = 1 # - # Thực hiện thay đổi giá trị Tuples sẽ bị lỗi day[1] = 2 # ### 3. Functions # # Hàm trong Python được định nghĩa bằng từ khoá `def`. Ví dụ: # # # + def sign(x): if x > 0: return 'positive' elif x < 0: return 'negative' else: return 'zero' for x in [-1, 0, 1]: print(sign(x)) # - # Chúng ta có thể thêm đối số trong hàm như sau: # + def hello(name, loud=False): if loud: print('HELLO, %s!' % name.upper()) else: print('Hello, %s' % name) hello('Bob') # Prints "Hello, Bob" hello('Fred', loud=True) # Prints "HELLO, FRED!" # - # Xem thêm về [Defining Functions](https://docs.python.org/3.5/tutorial/controlflow.html#defining-functions) # ### 4. Classes # Cú pháp để sử dụng Class trong Python rất đơn giản: # + class Greeter(object): # Constructor def __init__(self, name): self.name = name # Cho giá trị của name vào self.name # Instance method def greet(self, loud=False): if loud: print('HELLO, %s!' % self.name.upper()) else: print('Hello, %s' % self.name) g = Greeter('Fred') # Khởi tạo Class g với đầu vào `Fred` g.greet() # Gọi hàm greet() khi loud = False; prints "Hello, Fred" g.greet(loud=True) # Gọi hàm greet() khi loud = True; prints "HELLO, FRED!" # - # Xem thêm về [Python Classes](https://docs.python.org/3.5/tutorial/classes.html) # ### Bảng tổng kết # # Bảng tổng kết các kiến thức cơ bản trong Python [tải xuống tại đây](https://github.com/thanhhff/AIVN-Machine-Learning/blob/master/Python/Python3_reference_cheat_sheet.pdf) # # <img src="https://imgur.com/1hMqrPZ.png"> # ### Tổng kết # # Thông qua bài này, các bạn đã nắm một số kiến thức cơ bản trong lập trình Python. Những nội dung bên trên có thể hơi nhanh với một số bạn mới bắt đầu học Python. Tuy nhiên, những nội dung này khá quan trọng trong và dùng nhiều để Code trong Machine Learning. Các bạn hãy chú ý học kỹ và làm bài tập đầy đủ !
Week 1/Python-Basic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="al7XUeL4cMtu" # Mount Google drive # + id="Oq0QQFwqWIGf" from google.colab import drive drive.mount('/content/drive/') # + [markdown] id="kaI-x89ncQPo" # Install Tensorflow Model # + id="pqz3tl_0XyZT" import os import pathlib # Clone the tensorflow models repository if it doesn't already exist if "models" in pathlib.Path.cwd().parts: while "models" in pathlib.Path.cwd().parts: os.chdir('..') elif not pathlib.Path('models').exists(): # !git clone --depth 1 https://github.com/tensorflow/models # + [markdown] id="cwKjWM-zcV3I" # Install Object Detection API # + id="oKsfEFuHX6kM" language="bash" # cd models/research/ # protoc object_detection/protos/*.proto --python_out=. # cp object_detection/packages/tf2/setup.py . # python -m pip install . # + [markdown] id="lZGxQHqPccin" # Test Model # + id="OhlSYnM1YZY9" # !python models/research/object_detection/builders/model_builder_tf2_test.py # + [markdown] id="l1_-YLD6cgeO" # Prepare tfrecord - train # + id="sfcqjy4DYpXJ" # !python 'drive/MyDrive/Colab Notebooks/tf2_captcha_test/my_create_pascal_tf_record.py' \ # --label_map_path='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/config/my_pascal_label_map.pbtxt' \ # --data_dir='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit' \ # --year=VOC2012 \ # --set=train \ # --output_path='drive/MyDrive/Colab Notebooks/tf2_captcha_test/pascal_train.record' # + id="P-HgBpVSnQOW" # !python 'drive/MyDrive/Colab Notebooks/tf2_captcha_test/my_create_pascal_tf_record.py' \ # --label_map_path='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/config/my_pascal_label_map.pbtxt' \ # --data_dir='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit' \ # --year=VOC2012 \ # --set=val \ # --output_path='drive/MyDrive/Colab Notebooks/tf2_captcha_test/pascal_val.record' # + [markdown] id="CwMCAvvottQf" # Start Tensorboard for monitoring # + id="qVdPwk79tqGP" # %load_ext tensorboard # %tensorboard --logdir 'drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/train/train' # + [markdown] id="0mjpf20Vss6E" # Train Model # + id="Zx6BXRuAr165" # !python models/research/object_detection/model_main_tf2.py \ # --pipeline_config_path='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/config/ssd_mobilenet_v2_320x320_coco17_tpu-8-colab.config' \ # --model_dir='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/train' \ # --alsologtostderr # + [markdown] id="br8qz-HBuWT_" # Export Model. You will get your model in ../exported_model/saved_model path. # + id="zGFXksHFuRRE" # !python models/research/object_detection/exporter_main_v2.py \ # --input_type=image_tensor \ # --pipeline_config_path='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/config/ssd_mobilenet_v2_320x320_coco17_tpu-8-colab.config' \ # --trained_checkpoint_dir='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/train' \ # --output_directory='drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/exported_model' # + [markdown] id="BjK3-Se4F3WP" # Load model and setting. # + id="lzBNz7GxFmyS" import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) import pathlib import tensorflow as tf import time from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as viz_utils tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2) # Enable GPU dynamic memory allocation gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) def download_images(): base_dir = 'drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/test_img/' filenames = ['captcha16.png', 'captcha17.png','captcha18.png'] #filenames = ['captcha241.png'] image_paths = [] for filename in filenames: image_path = base_dir + filename image_path = pathlib.Path(image_path) image_paths.append(str(image_path)) return image_paths IMAGE_PATHS = download_images() print(IMAGE_PATHS) PATH_TO_MODEL_DIR = 'drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/exported_model' print(PATH_TO_MODEL_DIR) PATH_TO_LABELS = 'drive/MyDrive/Colab Notebooks/tf2_captcha_test/VOCdevkit/config/my_pascal_label_map.pbtxt' print(PATH_TO_LABELS) PATH_TO_SAVED_MODEL = PATH_TO_MODEL_DIR + "/saved_model" print('Loading model...', end='') start_time = time.time() # Load saved model and build the detection function detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL) end_time = time.time() elapsed_time = end_time - start_time print('Done! Took {} seconds'.format(elapsed_time)) category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) # + [markdown] id="sTlbZsjLGhIL" # Run your model. # + id="rcxQ-_7YGjf9" # %matplotlib inline import numpy as np from PIL import Image import matplotlib.pyplot as plt #import warnings #warnings.filterwarnings('ignore') # Suppress Matplotlib warnings def load_image_into_numpy_array(path): return np.array(Image.open(path)) for image_path in IMAGE_PATHS: print('Running inference for {}... '.format(image_path), end='') image_np = load_image_into_numpy_array(image_path) input_tensor = tf.convert_to_tensor(image_np) input_tensor = input_tensor[tf.newaxis, ...] detections = detect_fn(input_tensor) num_detections = int(detections.pop('num_detections')) detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()} detections['num_detections'] = num_detections detections['detection_classes'] = detections['detection_classes'].astype(np.int64) image_np_with_detections = image_np.copy() viz_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'], detections['detection_classes'], detections['detection_scores'], category_index, use_normalized_coordinates=True, max_boxes_to_draw=10, min_score_thresh=.50, agnostic_mode=False) plt.figure() plt.imshow(image_np_with_detections) print('Done') plt.show() print('Draw images done')
tf2_captcha_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DARPA-ARC Notebook 4: fMRI Second Levels # ## Precompute Permutations # Based on intial calculations, we assume one full loop of WLS + TFCE will take ~17s. We will submit jobs of 100 iterations (approx. 30 minutes time on cluster). # + from my_settings import os, op, np, root_dir, version, n_subj, n_permutations, inc np.random.seed(47404) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Generate permutations. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# permutations = [] while True: arr = np.random.choice([1,-1],n_subj,replace=True) if not np.any(np.apply_along_axis(np.array_equal, 0, permutations, arr)): permutations.append(arr) if len(permutations) >= n_permutations: break #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Save. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# permutations = np.array(permutations) index = np.arange(0,n_permutations+1,inc) for n, ix in enumerate(index[1:]): np.save(op.join(root_dir, 'fmri_second_levels', 'permutations', '%s_sign_flips_%s' % (version, (n+1))), permutations[ix-inc:ix]) with open(op.join(op.join(root_dir, 'fmri_second_levels', '%s_permutations.txt' % version)), 'w') as f: f.write('\n'.join(['%i' % i for i in np.arange(n_permutations/inc+1)])) print('Done.') # - # ## Make Surface Masks # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, subj_dir, label_dir, rois, task) from mne import read_label, read_surface, spatial_tris_connectivity, set_log_level set_log_level(verbose=False) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Make labels. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# for hemi in ['lh', 'rh']: # ## Assemble and merge labels. label = [] for roi in rois: label.append(read_label(op.join(label_dir,'%s-%s.label' % (roi, hemi)))) label = np.sum(label) # ## Save label. label.name = '%s-%s' % (task, hemi) label.save(op.join(root_dir, 'fmri_second_levels/%s-%s.label' % (task, hemi))) # ## Load surface. _, tris = read_surface(op.join(subj_dir, 'surf', '%s.white' % hemi)) mapping = np.in1d(np.unique(tris),label.vertices) # ## Reduce triangles to those in label. ix = np.all(np.apply_along_axis(np.in1d, 0, tris, label.vertices), axis=1) tris = tris[ix] # ## Compute connectivity. coo = spatial_tris_connectivity(tris, remap_vertices=True) np.savez(op.join(root_dir, 'fmri_second_levels/%s_%s_connectivity' % (version, hemi)), data = coo.data, row = coo.row, col = coo.col, shape = coo.shape, mapping=mapping, vertices=label.vertices) print('Done.') # - # ## Make Volume Mask # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, subj_dir, roi_dict, asegf) from scipy.sparse import coo_matrix lut = '/usr/local/freesurfer/stable5_3_0/FreeSurferColorLUT.txt' #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Create mask. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## Load aseg. aseg = nib.load(asegf).get_data() ## Find all voxels in ROI list. Get corresponding labels. mapping = np.in1d(aseg, list(roi_dict.keys())).reshape(aseg.shape) voxels = np.where(mapping) names = np.array([roi_dict[i] for i in aseg[voxels]]) voxels = np.vstack(voxels).T ## Initialize connectivity matrix. n_voxels, _ = voxels.shape coo = np.zeros([n_voxels,n_voxels], dtype=int) ## Iteratively test for adjacency. ## Here we use 6-lattice connectivity (up,down,forward,backward,left,right). for n in range(n_voxels): diff = np.linalg.norm(voxels - voxels[n], axis=1) M, = np.where(diff==1.) for m in M: coo[n,m] = 1 coo = coo_matrix(coo) ## Save. np.savez(op.join(root_dir, 'fmri_second_levels/%s_mni305_connectivity' % version), data = coo.data, row = coo.row, col = coo.col, shape = coo.shape, mapping=mapping, voxels=voxels, names=names) print('Done.') # - # ## Extract Mean Signal from ROIs # Necessary for computing percent signal change down the line. # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, task, session) ## Define TR onsets. tr_onsets = np.insert( np.cumsum( np.ones(n_acq - 1) * tr ), 0, 0 ) mean_signal = dict() for space in ['lh','rh','mni305']: # print(space) # ## Load masks. npz = np.load(op.join(root_dir,'fmri_second_levels/%s_%s_connectivity.npz' % (version, space))) include = npz['mapping'] # ## Preallocate space. ms = np.zeros([len(subjects), include.sum()]) # ## Iterate over subjects. for n, subject in enumerate(subjects): # ## Load data. subj_dir = op.join(root_dir, 'fmri_first_levels', subject, '%s_%03d' % (task, session), '%03d' % session) if space == 'mni305': f = op.join(subj_dir,'fmcpr.sm%s.%s.2mm.b0dc.nii.gz' % (sm, space)) else: f = op.join(subj_dir,'fmcpr.sm%s.fsaverage.%s.b0dc.nii.gz' % (sm, space)) data = nib.load(f).get_data() # ## Censor data. Average across acquisitions. try: censor = np.loadtxt(op.join(subj_dir, '%s.censor.%s.par' % (version, fd))) except IOError: censor = [] censor = np.invert(np.in1d(tr_onsets, censor)) # data = data[include,...].squeeze() data = data[...,censor].mean(axis=1) # ## Append. ms[n] = data # ## Store in dictionary. mean_signal[space] = ms ## Save. f = op.join(root_dir, 'fmri_second_levels/%s_mean_signal' % version) np.savez_compressed(f, lh = mean_signal['lh'], rh = mean_signal['rh'], mni305 = mean_signal['mni305']) print('Done.') # - # ## Assemble Data # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, spaces, models, task, models, conditions_dict) from mne import read_label #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# for model_name, analysis, epochs_type in models: # for space in spaces: # ## Load masks. npz = np.load(op.join(root_dir, 'fmri_second_levels/%s_%s_connectivity.npz' % (version, space))) include = npz['mapping'] # results_dir = op.join(concat_sess_dir, ('%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space))) # for condition in ['Control'] + conditions_dict[analysis]: # print(model_name, analysis, epochs_type, space, condition) # condition_dir = op.join(results_dir, ('%s.%s.%s.%s.%s.par' % (version, model_name, analysis, epochs_type, condition))) # ## Make save directory. out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) if not op.isdir(out_dir): os.makedirs(out_dir) # ## Load data. ces = nib.load(op.join(condition_dir, 'ces.nii.gz')).get_data().squeeze() cesvar = nib.load(op.join(condition_dir, 'cesvar.nii.gz')).get_data().squeeze() affine = nib.load(op.join(condition_dir, 'ces.nii.gz')).affine # ## Masking. ces = ces[include,...] cesvar = cesvar[include,...] # ## Save. np.savez_compressed(op.join(out_dir, 'first_levels'), ces=ces.squeeze(), cesvar=cesvar.squeeze()) np.save(op.join(out_dir, 'affine'), affine) print('Done.') # - # ## Perform WLS Permutations # # This is done on on a cluster or in parallel using the fmri_second_levels/wls_perm.csh and fmri_second_levels/wls_perm.py scripts # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, task, n_subj, X, n_subj, n_pred, prepare_image, load_sparse_coo, wls, spaces, models, conditions_dict) from mne.stats.cluster_level import _find_clusters as find_clusters #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Define parameters. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## I/O parameters. space = spaces[-1] model_name, analysis, epochs_type = models[0] # print(model_name, analysis, epochs_type, space) # ## Permutation parameters. permutations = 0 ''' from subprocess import call # DEBUGGING regressor = '.'.join([version, model_name, analysis, epochs_type, conditions_dict[analysis][-1], 'par']) args = [space, regressor, permutations] call(['python fmri_second_levels/wls_perm.py %s %s %s' % (space, regressor, permutations)], env=os.environ, shell=True) ''' ## TFCE parameters. threshold = dict(start=0.1, step=0.1, h_power=2, e_power=0.5) tail = 0 max_step = 1 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Load and prepare data. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) ## Load data. npz = np.load(os.path.join(out_dir, 'first_levels.npz')) ces = npz['ces'] cesvar = np.abs( 1. / npz['cesvar'] ) ## Define indices. connectivity = load_sparse_coo(os.path.join(root_dir, 'fmri_second_levels', '%s_%s_connectivity.npz' % (version, space))) index, = np.where(~np.isinf(cesvar).sum(axis=1).astype(bool)) include = ~np.isinf(cesvar).sum(axis=1).astype(bool) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Setup for permutation testing. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## If specified, load precomputed sign flips. if permutations: sign_flips = np.load(os.path.join(root_dir, 'fmri_second_levels', 'permutations', '%s_sign_flips_%s.npy' % (version, permutations))) else: sign_flips = np.ones((1,n_subj)) n_shuffles = sign_flips.shape[0] ## Preallocate arrays for results. shape = [n_shuffles] + list(ces.shape[:-1]) Bmap = np.zeros(shape) Fmap = np.zeros(shape) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## Loop it! for n, sf in enumerate(sign_flips): # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Compute statistics. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # for m in index: # ## Update variables. W = np.diag(cesvar[m]) Y = ces[m] # ## Permute values. ## See Winkler et al. (2014), pg. 385 ## To compute Hat Matrix, see: https://en.wikipedia.org/wiki/Projection_matrix and Z = X[:,1:] ZZ = Z.dot( np.linalg.inv( Z.T.dot(W).dot(Z) ) ).dot(Z.T).dot(W) Rz = np.identity(n_subj) - ZZ Y = np.diag(sf).dot(Rz).dot(Y) # ## Perform WLS. Bmap[n,m], Fmap[n,m] = wls(X,Y,W) # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Perform TFCE. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # _, Fmap[n] = find_clusters(Fmap[n], threshold, tail=tail, connectivity=connectivity, include=include, max_step=max_step, show_info=False) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Save results. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# if permutations: f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_perm-%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition, permutations))) else: f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_obs' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) np.savez_compressed(f, Bmap=Bmap, Fmap=Fmap) print('Done.') # - # ## Perform FWE Corrections # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, spaces, task, prepare_image, models, conditions_dict, n_permutations, inc) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# permutations = np.arange(int(n_permutations/inc)) + 1 for model_name, analysis, epochs_type in models: # for condition in ['Control'] + conditions_dict[analysis]: # for space in spaces: # print(model_name, analysis, epochs_type, condition, space) # out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) obs_f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_obs.npz' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) # ## Load true effects. npz = np.load(obs_f) Bmap = npz['Bmap'].squeeze() Fmap = npz['Fmap'].squeeze() # ## Load permutations. perm_f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition)) + '_perm-%s.npz') Pmap = [] for p in permutations: try: npz = np.load(perm_f % p) Pmap.append(npz['Fmap']) except Exception as e: print(e, p) # I was getting bad magic numbers on some file and had to rerun WLS Pmap = np.concatenate(Pmap, axis=0) n_permutations, _ = Pmap.shape # ## Compute p-values via FWE. p_values = np.ones_like(Fmap) for mp in Pmap.max(axis=1): p_values += mp > Fmap p_values /= n_permutations + 1. p_values = -np.log10(p_values) * np.sign(Bmap) # ## Save maps. out_f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_fwe' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) np.save(out_f, p_values) for arr, name in zip([Bmap,Fmap,p_values],['beta', 'F', 'fwe']): image = prepare_image(arr, space) image = nib.Nifti1Image(image, np.load(op.join(out_dir, 'affine.npy'))) nib.save(image, op.join(out_dir, '%s.nii.gz' % name)) print('Done.') # - # ## Perform FDR Corrections # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, models, spaces, task, prepare_image, conditions_dict, n_permutations, inc) from mne.stats import fdr_correction #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# permutations = np.arange(int(n_permutations/inc)) + 1 for model_name, analysis, epochs_type in models: # for condition in ['Control'] + conditions_dict[analysis]: # FDR, signs = [], [] # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Compute p-values within spaces. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # for n, space in enumerate(spaces): # print(model_name, analysis, epochs_type, condition, space) # out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) obs_f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_obs.npz' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) # ## Load true effects. npz = np.load(obs_f) Bmap = npz['Bmap'].squeeze() Fmap = npz['Fmap'].squeeze() # ## Load permutations. perm_f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition)) + '_perm-%s.npz') Pmap = [] for p in permutations: try: npz = np.load(perm_f % p) Pmap.append(npz['Fmap']) except Exception as e: print(e, p) # I was getting bad magic numbers on some file and had to rerun WLS Pmap = np.concatenate(Pmap, axis=0) n_permutations, _ = Pmap.shape # ## Compute p-values via FWE. p_values = (Pmap >= Fmap).sum(axis=0) + 1. p_values /= n_permutations + 1. FDR.append(p_values) signs.append(np.sign(Bmap)) # '''## Save maps. for arr, name in zip([Bmap, Fmap],['beta', 'F']): image = prepare_image(arr, space) image = nib.Nifti1Image(image, np.load(op.join(out_dir, 'affine.npy'))) nib.save(image, op.join(out_dir, '%s.nii.gz' % name))''' # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Perform FDR corrections. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # ## Assemble info. indices = np.concatenate([np.ones_like(arr) * n for n, arr in enumerate(FDR)]) FDR = np.concatenate(FDR) signs = np.concatenate(signs) # ## Perform FDR correction. FDR[np.where(signs)] = fdr_correction(FDR[np.where(signs)])[-1] FDR = -np.log10(FDR) * signs # ## Save maps. for n, space in enumerate(spaces): out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) out_f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_fdr' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) np.save(out_f, FDR[indices == n]) image = prepare_image(FDR[indices == n], space) image = nib.Nifti1Image(image, np.load(op.join(out_dir, 'affine.npy'))) nib.save(image, op.join(out_dir, 'fdr.nii.gz')) print('Done.') # - # # Section 5: Visualization # ## Threshold Second-Level Maps # Thresholding clusters such that: # * p < 0.05 (FWE corrected, alpha = 0.05) # * Surface: clusters > 100mm2 # * Volume: clusters > 20 contiguous voxels # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, models, spaces, task, prepare_image, fs_dir, load_sparse_coo, conditions_dict, psc_threshold) from mne.stats.cluster_level import _find_clusters as find_clusters #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Define parameters. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## Thresholding parameters. correction = 'fdr' threshold = -np.log10( psc_threshold ) min_cluster = dict(lh = 100, rh = 100, mni305 = 20) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# for space in spaces: # ## Load connectivity information. connectivity = load_sparse_coo(op.join(root_dir, 'fmri_second_levels/%s_%s_connectivity.npz' % (version, space))) # ## Load mapping information. npz = np.load(op.join(root_dir, 'fmri_second_levels/%s_%s_connectivity.npz' % (version, space))) # if space != 'mni305': vertices = npz['vertices'] average_area = nib.load(op.join(fs_dir, 'fsaverage', 'surf', '%s.white.avg.area.mgh' % space)).get_data() average_area = average_area[vertices].squeeze() # for model_name, analysis, epochs_type in models: # print(space, model_name, analysis, epochs_type) # for condition in ['Control'] + conditions_dict[analysis]: # ## Load corrected p-values. out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) f = op.join(out_dir, ('%s.%s.%s.%s.%s.%s.%s.%s.%s_%s.npy' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition, correction))) pmap = np.load(f) # ## Find clusters. include = np.where(pmap, True, False) clusters, sums = find_clusters(pmap, threshold, tail=0, connectivity=connectivity, include=include, t_power=0) # ## Compute areas. if space == 'mni305': cluster_sums = sums else: cluster_sums = np.array([average_area[c].sum() for c in clusters]) # ## Threshold. try: survival_ix = np.concatenate([c for c, s in zip(clusters, cluster_sums) if s > min_cluster[space]]) pmap[~np.in1d(np.arange(pmap.shape[0]), survival_ix)] = 0 except ValueError: #print('No clusters', space, model_name, analysis, epochs_type, condition) pmap = np.zeros_like(pmap) pmap[0] = 1; pmap[-1] = 1 # pysurfer bug: https://github.com/nipy/PySurfer/issues/267 # ## Save. image = prepare_image(pmap, space) image = nib.Nifti1Image(image, np.load(op.join(op.dirname(f), 'affine.npy'))) nib.save(image, op.join(op.dirname(f), '%s_thresh_%s.nii.gz' % (correction, psc_threshold))) print('Done.') # - # ## Compute Percent Signal Change # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, models, spaces, task, session, prepare_image, fs_dir, load_sparse_coo, psc_threshold, subjects, conditions_dict, plt) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main Loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# correction = 'fdr' ## Load average signal. mean_signal = np.load(op.join(root_dir, 'fmri_second_levels/%s_mean_signal.npz' % version)) for space in spaces: # ## Assemble design matrices. subj_dir = op.join(root_dir, 'fmri_first_levels', '%s', '%s_%03d' % (task, session)) for model_name, analysis, epochs_type in models: # print(space, model_name, analysis, epochs_type) # X_f = op.join(subj_dir, ('%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space)), 'X.dat') scale_factors = np.array([np.loadtxt(X_f % subject).max(axis=0)[:(len(conditions_dict[analysis])+1)] for subject in subjects]).T # for n, condition in enumerate(['Control'] + conditions_dict[analysis]): # ## Load first levels. out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) ces = np.load(op.join(out_dir, 'first_levels.npz'))['ces'] # ## Compute PSC (Pernet 2014, Frontiers in Neuroscience). ms = np.where(mean_signal[space], mean_signal[space], np.inf).T psc = np.divide(ces * scale_factors[n] * 100., ms) psc = prepare_image(psc.mean(axis=1), space) # ## Mask image. pmap = nib.load(op.join(out_dir, '%s_thresh_%s.nii.gz' % (correction, psc_threshold))).get_data() psc *= np.where(pmap, 1, 0) # ## Save. image = nib.Nifti1Image(psc, np.load(op.join(out_dir, 'affine.npy'))) nib.save(image, op.join(out_dir, '%s_psc.nii.gz' % correction)) print('Done.') # - # ## Surface Plots # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, models, spaces, task, prepare_image, fs_dir, load_sparse_coo, subjects, img_dir, overlay, surface, conditions_dict, psc_threshold) from surfer import Brain correction = 'fwe' #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Plot. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# for hemi in ['lh', 'rh']: # for model_name, analysis, epochs_type in models: # for condition in ['Control'] + conditions_dict[analysis]: # print(hemi, model_name, analysis, epochs_type, condition) # out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, hemi, condition))) for view in ['lateral', 'medial']: fn = op.join(out_dir, '%s_%s.nii.gz' % (correction, overlay)) # brain = Brain('fsaverage', hemi, surface, subjects_dir=fs_dir) brain.add_overlay(fn, min=0.001, max=0.1, sign="pos") brain.show_view(view=view) od = op.join(img_dir, overlay, surface, ('%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd))) if not op.isdir(od): os.makedirs(od) out_f = op.join(od, '%s.%s.%s.%s.png' % (correction, condition, hemi, view)) Brain.save_image(brain, out_f) # - # ## Compute surface summary table # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, models, spaces, task, prepare_image, fs_dir, load_sparse_coo, subjects, img_dir, overlay, surface, psc_threshold, conditions_dict, label_dir, rois) from mne import Label, read_label, grow_labels, vertex_to_mni, set_log_level set_log_level(verbose=False) threshold = -np.log10( psc_threshold ) correction = 'fwe' ## ROI parameters. extent = 10 #mm grow = False #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# for model_name, analysis, epochs_type in models: # for condition in ['Control'] + conditions_dict[analysis]: # for hemi in ['lh', 'rh']: # print(model_name, analysis, epochs_type, condition, hemi) # out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, hemi, condition))) #label_dir = op.join(root_dir, 'fmri_second_levels/labels/', 'seeds_%s' % task) #labels = sorted([f for f in os.listdir(label_dir) if not f.startswith('fig') and f.endswith('label')]) # ## Load accompanying overlay. f = op.join(out_dir, '%s_psc.nii.gz' % correction) overlay = nib.load(f).get_data().squeeze() # with open(op.join(out_dir, ('surface_mni2.%s.%s.%s.%s.%s.%s.%s.%s.%s.csv' % (version, task, model_name, analysis, epochs_type, sm, fd, hemi, condition))), 'w') as fmni: fmni.write(','.join(['Label', 'V', 'X', 'Y', 'Z', 'PSC', 'F', 'p']) + '\n') # for roi in rois: # label = read_label(op.join(label_dir, '%s-%s.label' % (roi, hemi))) # ## Find maximum vertex. ix = np.argmax(overlay[label.vertices]) v = label.vertices[ix] # ## Extract MNI coordinates. x, y, z = vertex_to_mni(v, 0 if hemi == 'lh' else 1, 'fsaverage', fs_dir)[0] # ## Extract PSC, F-scores, p-values. f = op.join(out_dir, 'psc.nii.gz') psc = nib.load(f).get_data().squeeze()[v] # f = op.join(out_dir, 'F.nii.gz') F = nib.load(f).get_data().squeeze()[v] # f = op.join(out_dir, 'fwe_thresh_%.3f.nii.gz' % threshold) p = nib.load(f).get_data().squeeze()[v] # ## Write information. fmni.write('%s-%s,%s,%.0f,%.0f,%.0f,%.2f,%.2f,%.6f\n' % (roi, hemi, v, x, y, z, psc, F, 10.**-p)) # if grow: ## Grow label. label = grow_labels('fsaverage', v, extent, 0 if hemi=='lh' else 1, subjects_dir=fs_dir, names='fig_%s-%s' % (roi, hemi), surface='pial')[0] # ## Ensure label is within actiation. Save. ix = np.in1d(label.vertices, np.where(overlay)[0]) label.pos = label.pos[ix] label.values = label.values[ix] label.vertices = label.vertices[ix] label.save('%s/%s.label' % (out_label_dir, label.name)) print('Done.') # - # ## Compute volume summary table # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, concat_sess_dir, thresholds, models, spaces, task, prepare_image, fs_dir, load_sparse_coo, subjects, img_dir, conditions_dict, psc_threshold, label_dir) from nibabel.affines import apply_affine space = 'mni305' threshold = -np.log10( psc_threshold ) correction = 'fwe' ## ROI parameters. extent = 6 #mm #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# for model_name, analysis, epochs_type in models: # for condition in ['Control'] + conditions_dict[analysis]: # print(model_name, analysis, epochs_type, condition) # out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) ## Initialize statistics file. with open(op.join(out_dir, ('volume_mni2.%s.%s.%s.%s.%s.%s.%s.%s.%s.csv' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))), 'w') as fmni: fmni.write(','.join(['Label','cX','cY','cZ','X','Y','Z','PSC','F','p']) + '\n') # ## Load data. npz = np.load(op.join(root_dir, 'fmri_second_levels/%s_mni305_connectivity.npz' % version)) affine = np.load(op.join(out_dir, 'affine.npy')) obj = nib.load(op.join(out_dir, '%s_psc.nii.gz' % correction)) # overlay = obj.get_data().squeeze() Fval = nib.load(op.join(out_dir, 'F.nii.gz')).get_data().squeeze() pval = nib.load(op.join(out_dir, 'fwe_thresh_%.3f.nii.gz' % threshold)).get_data().squeeze() # rois = ['Left-Caudate', 'Left-Putamen', 'Left-Hippocampus', 'Right-Caudate', 'Right-Putamen', 'Right-Hippocampus'] for roi in rois: # ## Extract activated voxels in ROI. voxels = npz['voxels'][npz['names'] == roi] voxels = voxels[np.where(overlay[tuple([arr for arr in voxels.T])])] if voxels.shape[0] == 0: continue # ## Find maximally activated voxel. ix = np.argmax(overlay[tuple([arr for arr in voxels.T])]) center = voxels[ix] i,j,k = center # ## Get MNI coordinates. x,y,z = apply_affine(affine, center) # ## Extract max values. psc = overlay[i,j,k] F = Fval[i,j,k] p = pval[i,j,k] # ## Write to file. fmni.write('%s,%.0d,%.0d,%.0d,%.0d,%.2d,%.2d,%.2f,%.2f,%.6f\n' % (roi, i, j, k, x, y, z, psc, F, 10.**-p)) # ## Create sphere: find all voxels within extent. dist = [np.linalg.norm( np.diff( apply_affine(affine,np.vstack([center,v])), axis=0 ) ) for v in voxels] ix = np.where(np.array(dist)<=extent) sphere = voxels[ix] # ## Save. #hemi, roi = roi.split('-') #if hemi.startswith('L'): name = '%s-lh' %roi.lower() #else: name = '%s-rh' %roi.lower() #np.save(op.join(out_dir, name), sphere) print('Done.') # - # ## Post-hoc F-statistic Fix # Sam realized very late in the game he should have been saving out the pre-TFCE F-statistics. Fortunately these can be recomputed using the WLS code sans TFCE. # + from my_settings import (os, op, np, root_dir, version, n_subj, nib, sm, fd, tr, n_acq, subjects, subj_dir, read_csv, concat_sess_dir, thresholds, models, spaces, task, prepare_image, fs_dir, load_sparse_coo, subjects, img_dir, wls, X, n_pred, conditions_dict, label_dir) for model_name, analysis, epochs_type in models: # for condition in ['Control'] + conditions_dict[analysis]: # for space in spaces: # print(model_name, analysis, epochs_type, condition, space) # out_dir = op.join(root_dir, 'fmri_second_levels', ('%s.%s.%s.%s.%s.%s.%s.%s.%s' % (version, task, model_name, analysis, epochs_type, sm, fd, space, condition))) # ## Load data. npz = np.load(op.join(out_dir, 'first_levels.npz')) ces = npz['ces'] cesvar = np.abs( 1. / npz['cesvar'] ) # ## Define indices. connectivity = load_sparse_coo(op.join(root_dir, 'fmri_second_levels', '%s_%s_connectivity.npz' % (version, space))) index, = np.where(~np.isinf(cesvar).sum(axis=1).astype(bool)) include = ~np.isinf(cesvar).sum(axis=1).astype(bool) # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Setup for permutation testing. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # sign_flips = np.ones((1,n_subj)) n_shuffles = sign_flips.shape[0] # ## Preallocate arrays for results. shape = [n_shuffles] + list(ces.shape[:-1]) Bmap = np.zeros(shape) Fmap = np.zeros(shape) # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Main loop. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # ## Loop it! for n, sf in enumerate(sign_flips): # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Compute statistics. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # for m in index: # ## Update variables. W = np.diag(cesvar[m]) Y = ces[m] # ## Permute values. ## See Winkler et al. (2014), pg. 385 ## To compute Hat Matrix, see: https://en.wikipedia.org/wiki/Projection_matrix and Z = X[:,1:] ZZ = Z.dot( np.linalg.inv( Z.T.dot(W).dot(Z) ) ).dot(Z.T).dot(W) Rz = np.identity(n_subj) - ZZ Y = np.diag(sf).dot(Rz).dot(Y) # ## Perform WLS. Bmap[n,m], Fmap[n,m] = wls(X,Y,W) # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ### Save results. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # ## Translate array back into proper space. image = prepare_image(Fmap.squeeze(), space).squeeze() # ## Load in results table. resultsf = op.join(label_dir, '%s' + ('_mni2.%s.%s.%s.%s.%s.%s.%s.%s.%s.csv' % (version, task, model_name, analysis, epochs_type, sm, fd, hemi, condition))) if space == 'mni305': results = read_csv(resultsf % 'volume') fscores = [image[i,j,k] for i,j,k in results[['cX','cY','cZ']].values] results['Fpre'] = fscores results.to_csv(resultsf % 'fstat_volume', index=False) else: results = read_csv(resultsf % 'surface') if not 'Fpre' in results.columns: results['Fpre'] = np.nan vertices = results.loc[[True if label.endswith(space) else False for label in results.Label],'V'].values for v in vertices: results.loc[results.V==v,'Fpre'] = image[v] results.to_csv(resultsf % 'fstat_surface', index=False) print('Done.')
04_fMRI-SecondLevels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import torch import numpy as np import matplotlib.pyplot as plt import norse from norse.torch import li_step, LICell, LIState, LIParameters, LIF from norse.torch import lif_step, LIFCell, LIFState, LIFParameters from norse.torch.module import leaky_integrator as li from norse.torch.functional import lif as lif import ProjectMethods as pm from scipy.signal import convolve2d from torch.nn import Conv2d as conv2 N=100 U = 2 T=200 #Creating a cell with a leak of 1 and a current time ( synaptic) time constant p = li.LIParameters(v_leak = torch.tensor(0), tau_syn_inv=torch.as_tensor(1/5e-3), tau_mem_inv= torch.as_tensor(0.7 / 1e-2)) #Creating LIF parameters, TBDefined2 p2 = LIFParameters(tau_syn_inv = torch.as_tensor(1.0 / 5e-3), tau_mem_inv = torch.as_tensor(0.7 / 1e-2), v_leak = torch.as_tensor(0), v_th = torch.as_tensor(0.1)) #Initial state of cell LI cells stateCell = li.LIState(v = torch.zeros(N), i = torch.zeros(N)) #Initial state of cell LIF cells #stateCell2= LIFState(z = torch.zeros(N), v = torch.zeros(N), i = torch.zeros(N)) stateCell2= lif.LIFFeedForwardState(v = torch.zeros(N), i = torch.zeros(N)) weights = torch.ones(N) voltages = torch.zeros(N,U,T) trueVals = torch.zeros(N,U) voltages2 = torch.zeros(N,U,T) trueVals2 = torch.zeros(N,U) decode = pm.decode(p) decode.print_max_min() data = pm.create_sparse_data(100, 100, 100) matrix = pm.sparse_data_to_sparse_matrix(data, [100, 100, 100]) matrix2 = torch.zeros(100,100) kernel = torch.ones([10,10]) for nr, array in enumerate(matrix, start=0): convolved = convolve2d(array, kernel, mode="valid") matrix2[nr] = torch.from_numpy(convolved[::10, ::10]).flatten() # - cell = LIF() data = torch.ones([5,2,2]) output, state = cell(data) print(state) cell2 = LIF() state2 = None #data = torch.ones([5,2,2]) #output, state = cell(data) #print(state) output, state2 = cell2(matrix2) print(output.size()) print(state) # + p2 = LIFParameters(tau_syn_inv = torch.as_tensor(1.0 / 5e-3), tau_mem_inv = torch.as_tensor(0.7 / 1e-2), v_leak = torch.as_tensor(0), v_th = torch.as_tensor(1)) cell3 = LIFCell(p=p2) print(cell3) state3 = None # + volt3 = torch.zeros(N,100) spikes = torch.zeros(N) for t, array in enumerate(matrix2,start=0): v, stateCell2 = lif.lif_feed_forward_step(input_tensor=array, state=stateCell2, p=p2, dt=0.001) output, state3 = cell3(input_tensor=array, state=state3) spikes = spikes + output #v, stateCell2 = lif_step(input_tensor=array, state=stateCell2,input_weights=weights, recurrent_weights=weights , p=p2, dt=0.001) for i in range(100): #volt3[i][t] = stateCell2.v[i] volt3[i][t] = state3.v[i] #volt3 #voltages[n][y][x] = v[n] print(spikes) print(torch.topk(spikes, 2).indices) avg = torch.tensor([0,0]) for nr in torch.topk(spikes, 2).indices: #print(nr) avg = avg + pm.neuron_nr_to_coord(nr) print(pm.neuron_nr_to_coord(nr)) print(avg/torch.topk(spikes, 2).indices.size(0)) pm.plotNeurons(volt3.detach(),N) # - # + for x in range(len(data)): # + #while True: for y in range(U): trueAngle = torch.randint(1800,2500,(2,)) angleval = torch.ones(2)-((trueAngle-1800)/700) for x in range(T): input = torch.zeros(N) for n in range(N): input[n] = to_input(torch.rand(1)-angleval[n]) #input = torch.tensor([to_input(torch.rand(1)-angleval[0]),to_input(torch.rand(1)-angleval[1])]) v, stateCell = li.li_feed_forward_step(input, state=stateCell, p=p, dt = 0.001) for n in range(N): voltages[n][y][x] = v[n] if x % 50 == 0: print("True angle: {}".format(trueAngle)) print("Approximated angle: {}".format(decode.decode_to_angle(v,adValue))) # if x == 198: # adValue = pm.train_val_to_angle_adjust(v, trueAngle, adValue) # print(adValue) #pm.plotNeurons(voltages, N, U) # -
testImageNeurons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How to generate data files for tabulated rates # These tabulated reaction rates are included along with library files # describing how the tables should be read. # # Tabulated reactions of the form `A -> B` are supported, where the # rates are tabulated in a two-dimensional space of: # # - the product of density and electron fraction # - temperature # # This directory includes tabulated rates obtained from Suzuki et al., # 2016, ApJ 817:163, downloaded from # http://w3p.phys.chs.nihon-u.ac.jp/~suzuki/data2/link.html # The suzuki data table are in the follower form: # ``` # !23Ne -> 23F, e-capture with screening effects # # !USDB Q=-8.4635 MeV # # !Transitions from 5/2+, 1/2+, 7/2+, 3/2+ states of 23Ne are included. # # !Experimental data are used. # ! # # !Log(rhoY) Log(T) mu dQ Vs e-cap-rate nu-energy-loss gamma-energy # ! (MeV) (MeV) (MeV) (1/s) (MeV/s) (MeV/s) # # 7.00 7.00 1.2282 0.0279 0.0077 -0.50000E+03 -0.50000E+03 -0.50000E+03 # 7.00 7.20 1.2270 0.0278 0.0077 -0.50000E+03 -0.50000E+03 -0.50000E+03 # 7.00 7.40 1.2253 0.0275 0.0077 -0.50000E+03 -0.50000E+03 -0.50000E+03 # ``` # First, we read it. # + import numpy as np import pandas as pd import re import matplotlib.pyplot as plt f = open("/Users/sailor/Desktop/A23_Ne_F.dat","r") data = f.readlines() # data is a list. each element is a line of "A23_Ne_F.dat" f.close() # - # Next, we find the header line of the data file # + inde = [] for i in range(len(data)): if data[i][0] == '!': # if the line start as "!" , this is a header line inde.append(i) # store the detected index into this list header = data[inde[0]:len(inde)] header # - # We modify the last 2 lines of header to meet the requirments of pynucastro. header[-2] = '!rhoY T mu dQ Vs e-cap-rate nu-energy-loss gamma-energy\n' header[-1] = '!g/cm^3 K erg erg erg 1/s erg/s erg/s\n' header # Then we convert the data table from a 1D string array to a 2D float array. # + del data[inde[0]:len(inde)] # delete the header lines # change the list ["1.23 3.45 5.67\n"] into the list ["1.23","3.45","5.67"] data1 = [] for i in range(len(data)): data1.append(re.split(r"[ ]",data[i].strip('\n'))) # delete all the "" in each element of data1 for i in range(len(data1)): while '' in data1[i]: data1[i].remove('') # delete all [] in data1 while [] in data1: data1.remove([]) # convert the type from string into float data2 = [] for i in range(len(data1)): data2.append([]) for j in range(len(data1[i])): data2[i].append(float(data1[i][j])) data2[0:3] # - # We need to convert the unit to meet the requirements of pynucastro # - convert log(rhoY/(g/cm^3)) to rhoY/(g/cm^3) # - convert log(T/K) to T/K # - convert Mev to erg in mu # - convert Mev to erg in dQ # - convert Mev to erg in Vs # - convert log(rate/(s^-1)) to rate/(s^-1) in e-cap-beta-rate # - convert log(rate/(Mev/s)) to rate/(erg/s) in nu-energy-loss # - convert log(rate/(Mev/s)) to rate/(erg/s) in gamma-energy for i in range(len(data2)): data2[i][0] = "%e"%(np.power(10,data2[i][0])) data2[i][1] = "%e"%(np.power(10,data2[i][1])) data2[i][2] = "%e"%(data2[i][2]*1.60218e-6) data2[i][3] = "%e"%(data2[i][3]*1.60218e-6) data2[i][4] = "%e"%(data2[i][4]*1.60218e-6) data2[i][5] = "%e"%(np.power(10,data2[i][5])) data2[i][6] = "%e"%(np.power(10,data2[i][6])*1.60218e-6) data2[i][7] = "%e"%(np.power(10,data2[i][7])*1.60218e-6) data2[0] # Then we write the new data into a data file that pynucastro can read. # + # convert the type from float to string data3 = [] for i in range(len(data2)): data3.append([]) for j in range(len(data2[i])): data3[i].append(str(data2[i][j])) f = open("/Users/sailor/Desktop/23Ne-23F_electroncapture.dat", "w") for i in range(len(header)): f.write(header[i]) for i in range(len(data3)): for j in range(len(data3[i])-1): f.write(data3[i][j]) f.write(' ') f.write(data3[i][len(data3[i])-1]) f.write('\n') f.close() # - # Next we need to generate a rate file to describe how to read the table file. (You can go to http://pynucastro.github.io/pynucastro/networks.html#tabular-rates to read details) rate_file = open("/Users/sailor/Desktop/ne23--f23-toki","w") rate_file.write("t\n "+"ne23"+" "+"f23"+"\n"+"23Ne-23F_electroncapture.dat"+"\n152\n39") rate_file.close() # In the end, we need to copy the two generated files ```23Ne-23F_electroncapture.dat``` and ```ne23--f23-toki``` into the folder ```/pynucastro/library/tabular/```. Then pynucastro can use them.
pynucastro/library/tabular/generate_tabulated_file.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from selectorlib import Extractor import requests from time import sleep import csv def scrape(url): headers = { 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'DNT': '1', 'Upgrade-Insecure-Requests': '1', # if blocked 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Referer': 'https://www.booking.com/index.en-gb.html', 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8', } # Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36 # Download the page using requests print("Downloading %s"%url) r = requests.get(url, headers=headers) # Pass the HTML of the page and create return e.extract(r.text,base_url=url) # product_data = [] with open("urls.txt",'r') as urllist, open('data.csv','w') as outfile: fieldnames = [ "name", "location", "price", "price_for", "room_type", "beds", "rating", "rating_title", "number_of_ratings", "url", "cm" ] writer = csv.DictWriter(outfile, fieldnames=fieldnames,quoting=csv.QUOTE_ALL) writer.writeheader() for url in urllist.readlines(): data = scrape(url) if data: for h in data['hotels']: writer7writerow(h) sleep(4) # - import pandas as pd # + df = pd.concat( map(pd.read_csv,['data-Copy1.csv', 'data-Copy2.csv', 'data-Copy3.csv', 'data-Copy4.csv', 'data-Copy5.csv', 'data-Copy6.csv', 'data-Copy7.csv', 'data-Copy8.csv', 'data-Copy9.csv', 'data-Copy10.csv', 'data-Copy11.csv']),ignore_index=True) print(df) # + csv_file_list = ['data-Copy1.csv', 'data-Copy2.csv'] # - import os import glob import pandas as pd # + list_of_dataframes = [] for filename in csv_file_list: list_of_dataframes.append(pd.read_csv(filename)) merged_df = pd.concat(list_of_dataframes) print([merged_df]) #combine all files in the list combined_csv = pd.concat([pd.read_csv(f) for f in csv_file_list ]) #export to csv combined_csv.to_csv( "combined_csv.csv", index=False, encoding='utf-8-sig') # + data1 = pd.read_csv('data-Copy1.csv') data2 = pd.read_csv('data-Copy2.csv') # using merge function by setting how='inner' output1 = pd.merge(data1, data2) # displaying result print(output1) # + import pandas as pd import glob interesting_files = glob.glob("*.csv") df_list = [] for filename in sorted(interesting_files): df_list.append(pd.read_csv(filename)) full_df = pd.concat(df_list) full_df.to_csv('output.csv') # -
scrap/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append("..") # Adds higher directory to python modules path. Tensor = list from typing import List def shape(tensor: Tensor) -> List[int]: sizes: List[int] = [] while isinstance(tensor, list): sizes.append(len(tensor)) tensor = tensor[0] return sizes assert shape([1, 2, 3]) == [3] assert shape([[1, 2], [3, 4], [5, 6]]) == [3, 2] def is_1d(tensor: Tensor) -> bool: """ If tensor[0] is a list, it's a higher-order tensor. Otherwise, tensor is 1-dimensonal (that is, a vector). """ return not isinstance(tensor[0], list) assert is_1d([1, 2, 3]) assert not is_1d([[1, 2], [3, 4]]) def tensor_sum(tensor: Tensor) -> float: """Sums up all the values in the tensor""" if is_1d(tensor): return sum(tensor) # just a list of floats, use Python sum else: return sum(tensor_sum(tensor_i) # Call tensor_sum on each row for tensor_i in tensor) # and sum up those results. assert tensor_sum([1, 2, 3]) == 6 assert tensor_sum([[1, 2], [3, 4]]) == 10 from typing import Callable def tensor_apply(f: Callable[[float], float], tensor: Tensor) -> Tensor: """Applies f elementwise""" if is_1d(tensor): return [f(x) for x in tensor] else: return [tensor_apply(f, tensor_i) for tensor_i in tensor] assert tensor_apply(lambda x: x + 1, [1, 2, 3]) == [2, 3, 4] assert tensor_apply(lambda x: 2 * x, [[1, 2], [3, 4]]) == [[2, 4], [6, 8]] def zeros_like(tensor: Tensor) -> Tensor: return tensor_apply(lambda _: 0.0, tensor) assert zeros_like([1, 2, 3]) == [0, 0, 0] assert zeros_like([[1, 2], [3, 4]]) == [[0, 0], [0, 0]] def tensor_combine(f: Callable[[float, float], float], t1: Tensor, t2: Tensor) -> Tensor: """Applies f to corresponding elements of t1 and t2""" if is_1d(t1): return [f(x, y) for x, y in zip(t1, t2)] else: return [tensor_combine(f, t1_i, t2_i) for t1_i, t2_i in zip(t1, t2)] import operator assert tensor_combine(operator.add, [1, 2, 3], [4, 5, 6]) == [5, 7, 9] assert tensor_combine(operator.mul, [1, 2, 3], [4, 5, 6]) == [4, 10, 18] from typing import Iterable, Tuple class Layer: """ Our neural networks will be composed of Layers, each of which knows how to do some computation on its inputs in the "forward" direction and propagate gradients in the "backward" direction. """ def forward(self, input): """ Note the lack of types. We're not going to be prescriptive about what kinds of inputs layers can take and what kinds of outputs they can return. """ raise NotImplementedError def backward(self, gradient): """ Similarly, we're not going to be prescriptive about what the gradient looks like. It's up to you the user to make sure that you're doing things sensibly. """ raise NotImplementedError def params(self) -> Iterable[Tensor]: """ Returns the parameters of this layer. The default implementation returns nothing, so that if you have a layer with no parameters you don't have to implement this. """ return () def grads(self) -> Iterable[Tensor]: """ Returns the gradients, in the same order as params() """ return () from scratch.neural_networks import sigmoid class Sigmoid(Layer): def forward(self, input: Tensor) -> Tensor: """ Apply sigmoid to each element of the input tensor, and save the results to use in backpropagation. """ self.sigmoids = tensor_apply(sigmoid, input) return self.sigmoids def backward(self, gradient: Tensor) -> Tensor: return tensor_combine(lambda sig, grad: sig * (1 - sig) * grad, self.sigmoids, gradient) # + import random from scratch.probability import inverse_normal_cdf # - def random_uniform(*dims: int) -> Tensor: if len(dims) == 1: return [random.random() for _ in range(dims[0])] else: return [random_uniform(*dims[1:]) for _ in range(dims[0])] def random_normal(*dims: int, mean: float = 0.0, variance: float = 1.0) -> Tensor: if len(dims) == 1: return [mean + variance * inverse_normal_cdf(random.random()) for _ in range(dims[0])] else: return [random_normal(*dims[1:], mean=mean, variance=variance) for _ in range(dims[0])] assert shape(random_uniform(2, 3, 4)) == [2, 3, 4] assert shape(random_normal(5, 6, mean=10)) == [5, 6] def random_tensor(*dims: int, init: str = 'normal') -> Tensor: if init == 'normal': return random_normal(*dims) elif init == 'uniform': return random_uniform(*dims) elif init == 'xavier': variance = len(dims) / sum(dims) return random_normal(*dims, variance=variance) else: raise ValueError(f"unknown init: {init}") from scratch.linear_algebra import dot class Linear(Layer): def __init__(self, input_dim: int, output_dim: int, init: str = 'xavier') -> None: """ A layer of output_dim neurons, each with input_dim weights (and a bias). """ self.input_dim = input_dim self.output_dim = output_dim # self.w[o] is the weights for the o-th neuron self.w = random_tensor(output_dim, input_dim, init=init) # self.b[o] is the bias term for the o-th neuron self.b = random_tensor(output_dim, init=init) def forward(self, input: Tensor) -> Tensor: # Save the input to use in the backward pass. self.input = input # Return the vector of neuron outputs. return [dot(input, self.w[o]) + self.b[o] for o in range(self.output_dim)] def backward(self, gradient: Tensor) -> Tensor: # Each b[o] gets added to output[o], which means # the gradient of b is the same as the output gradient. self.b_grad = gradient # Each w[o][i] multiplies input[i] and gets added to output[o]. # So its gradient is input[i] * gradient[o]. self.w_grad = [[self.input[i] * gradient[o] for i in range(self.input_dim)] for o in range(self.output_dim)] # Each input[i] multiplies every w[o][i] and gets added to every # output[o]. So its gradient is the sum of w[o][i] * gradient[o] # across all the outputs. return [sum(self.w[o][i] * gradient[o] for o in range(self.output_dim)) for i in range(self.input_dim)] def params(self) -> Iterable[Tensor]: return [self.w, self.b] def grads(self) -> Iterable[Tensor]: return [self.w_grad, self.b_grad] from typing import List class Sequential(Layer): """ A layer consisting of a sequence of other layers. It's up to you to make sure that the output of each layer makes sense as the input to the next layer. """ def __init__(self, layers: List[Layer]) -> None: self.layers = layers def forward(self, input): """Just forward the input through the layers in order.""" for layer in self.layers: input = layer.forward(input) return input def backward(self, gradient): """Just backpropagate the gradient through the layers in reverse.""" for layer in reversed(self.layers): gradient = layer.backward(gradient) return gradient def params(self) -> Iterable[Tensor]: """Just return the params from each layer.""" return (param for layer in self.layers for param in layer.params()) def grads(self) -> Iterable[Tensor]: """Just return the grads from each layer.""" return (grad for layer in self.layers for grad in layer.grads()) class Loss: def loss(self, predicted: Tensor, actual: Tensor) -> float: """How good are our predictions? (Larger numbers are worse.)""" raise NotImplementedError def gradient(self, predicted: Tensor, actual: Tensor) -> Tensor: """How does the loss change as the predictions change?""" raise NotImplementedError class SSE(Loss): """Loss function that computes the sum of the squared errors.""" def loss(self, predicted: Tensor, actual: Tensor) -> float: # Compute the tensor of squared differences squared_errors = tensor_combine( lambda predicted, actual: (predicted - actual) ** 2, predicted, actual) # And just add them up return tensor_sum(squared_errors) def gradient(self, predicted: Tensor, actual: Tensor) -> Tensor: return tensor_combine( lambda predicted, actual: 2 * (predicted - actual), predicted, actual) sse_loss = SSE() assert sse_loss.loss([1, 2, 3], [10, 20, 30]) == 9 ** 2 + 18 ** 2 + 27 ** 2 assert sse_loss.gradient([1, 2, 3], [10, 20, 30]) == [-18, -36, -54] class Optimizer: """ An optimizer updates the weights of a layer (in place) using information known by either the layer or the optimizer (or by both). """ def step(self, layer: Layer) -> None: raise NotImplementedError class GradientDescent(Optimizer): def __init__(self, learning_rate: float = 0.1) -> None: self.lr = learning_rate def step(self, layer: Layer) -> None: for param, grad in zip(layer.params(), layer.grads()): # Update param using a gradient step param[:] = tensor_combine( lambda param, grad: param - grad * self.lr, param, grad) # + tensor = [[1, 2], [3, 4]] for row in tensor: row = [0, 0] assert tensor == [[1, 2], [3, 4]], "assignment doesn't update a list" for row in tensor: row[:] = [0, 0] assert tensor == [[0, 0], [0, 0]], "but slice assignment does" # - class Momentum(Optimizer): def __init__(self, learning_rate: float, momentum: float = 0.9) -> None: self.lr = learning_rate self.mo = momentum self.updates: List[Tensor] = [] # running average def step(self, layer: Layer) -> None: # If we have no previous updates, start with all zeros. if not self.updates: self.updates = [zeros_like(grad) for grad in layer.grads()] for update, param, grad in zip(self.updates, layer.params(), layer.grads()): # Apply momentum update[:] = tensor_combine( lambda u, g: self.mo * u + (1 - self.mo) * g, update, grad) # Then take a gradient step param[:] = tensor_combine( lambda p, u: p - self.lr * u, param, update) import math def tanh(x: float) -> float: # If x is very large or very small, tanh is (essentially) 1 or -1. # We check for this because e.g. math.exp(1000) raises an error. if x < -100: return -1 elif x > 100: return 1 em2x = math.exp(-2 * x) return (1 - em2x) / (1 + em2x) class Tanh(Layer): def forward(self, input: Tensor) -> Tensor: # Save tanh output to use in backward pass. self.tanh = tensor_apply(tanh, input) return self.tanh def backward(self, gradient: Tensor) -> Tensor: return tensor_combine( lambda tanh, grad: (1 - tanh ** 2) * grad, self.tanh, gradient) class Relu(Layer): def forward(self, input: Tensor) -> Tensor: self.input = input return tensor_apply(lambda x: max(x, 0), input) def backward(self, gradient: Tensor) -> Tensor: return tensor_combine(lambda x, grad: grad if x > 0 else 0, self.input, gradient) def softmax(tensor: Tensor) -> Tensor: """Softmax along the last dimension""" if is_1d(tensor): # Subtract largest value for numerical stabilitity. largest = max(tensor) exps = [math.exp(x - largest) for x in tensor] sum_of_exps = sum(exps) # This is the total "weight". return [exp_i / sum_of_exps # Probability is the fraction for exp_i in exps] # of the total weight. else: return [softmax(tensor_i) for tensor_i in tensor] class SoftmaxCrossEntropy(Loss): """ This is the negative-log-likelihood of the observed values, given the neural net model. So if we choose weights to minimize it, our model will be maximizing the likelihood of the observed data. """ def loss(self, predicted: Tensor, actual: Tensor) -> float: # Apply softmax to get probabilities probabilities = softmax(predicted) # This will be log p_i for the actual class i and 0 for the other # classes. We add a tiny amount to p to avoid taking log(0). likelihoods = tensor_combine(lambda p, act: math.log(p + 1e-30) * act, probabilities, actual) # And then we just sum up the negatives. return -tensor_sum(likelihoods) def gradient(self, predicted: Tensor, actual: Tensor) -> Tensor: probabilities = softmax(predicted) # Isn't this a pleasant equation? return tensor_combine(lambda p, actual: p - actual, probabilities, actual) # + class Dropout(Layer): def __init__(self, p: float) -> None: self.p = p self.train = True def forward(self, input: Tensor) -> Tensor: if self.train: # Create a mask of 0s and 1s shaped like the input # using the specified probability. self.mask = tensor_apply( lambda _: 0 if random.random() < self.p else 1, input) # Multiply by the mask to dropout inputs. return tensor_combine(operator.mul, input, self.mask) else: # During evaluation just scale down the outputs uniformly. return tensor_apply(lambda x: x * (1 - self.p), input) def backward(self, gradient: Tensor) -> Tensor: if self.train: # Only propagate the gradients where mask == 1 return tensor_combine(operator.mul, gradient, self.mask) else: raise RuntimeError("don't call backward when not in train mode") #plt.savefig('im/mnist.png') #plt.gca().clear() # - def one_hot_encode(i: int, num_labels: int = 10) -> List[float]: return [1.0 if j == i else 0.0 for j in range(num_labels)] assert one_hot_encode(3) == [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] assert one_hot_encode(2, num_labels=5) == [0, 0, 1, 0, 0] # + from scratch.linear_algebra import squared_distance import json # - def save_weights(model: Layer, filename: str) -> None: weights = list(model.params()) with open(filename, 'w') as f: json.dump(weights, f) def load_weights(model: Layer, filename: str) -> None: with open(filename) as f: weights = json.load(f) # Check for consistency assert all(shape(param) == shape(weight) for param, weight in zip(model.params(), weights)) # Then load using slice assignment: for param, weight in zip(model.params(), weights): param[:] = weight # !pip install mnist # !pip install tqdm # + def main(): # XOR revisited # training data xs = [[0., 0], [0., 1], [1., 0], [1., 1]] ys = [[0.], [1.], [1.], [0.]] random.seed(0) net = Sequential([ Linear(input_dim=2, output_dim=2), Sigmoid(), Linear(input_dim=2, output_dim=1) ]) import tqdm optimizer = GradientDescent(learning_rate=0.1) loss = SSE() with tqdm.trange(3000) as t: for epoch in t: epoch_loss = 0.0 for x, y in zip(xs, ys): predicted = net.forward(x) epoch_loss += loss.loss(predicted, y) gradient = loss.gradient(predicted, y) net.backward(gradient) optimizer.step(net) t.set_description(f"xor loss {epoch_loss:.3f}") for param in net.params(): print(param) # FizzBuzz Revisited from scratch.neural_networks import binary_encode, fizz_buzz_encode, argmax xs = [binary_encode(n) for n in range(101, 1024)] ys = [fizz_buzz_encode(n) for n in range(101, 1024)] NUM_HIDDEN = 25 random.seed(0) net = Sequential([ Linear(input_dim=10, output_dim=NUM_HIDDEN, init='uniform'), Tanh(), Linear(input_dim=NUM_HIDDEN, output_dim=4, init='uniform'), Sigmoid() ]) def fizzbuzz_accuracy(low: int, hi: int, net: Layer) -> float: num_correct = 0 for n in range(low, hi): x = binary_encode(n) predicted = argmax(net.forward(x)) actual = argmax(fizz_buzz_encode(n)) if predicted == actual: num_correct += 1 return num_correct / (hi - low) optimizer = Momentum(learning_rate=0.1, momentum=0.9) loss = SSE() with tqdm.trange(1000) as t: for epoch in t: epoch_loss = 0.0 for x, y in zip(xs, ys): predicted = net.forward(x) epoch_loss += loss.loss(predicted, y) gradient = loss.gradient(predicted, y) net.backward(gradient) optimizer.step(net) accuracy = fizzbuzz_accuracy(101, 1024, net) t.set_description(f"fb loss: {epoch_loss:.2f} acc: {accuracy:.2f}") # Now check results on the test set print("test results", fizzbuzz_accuracy(1, 101, net)) random.seed(0) net = Sequential([ Linear(input_dim=10, output_dim=NUM_HIDDEN, init='uniform'), Tanh(), Linear(input_dim=NUM_HIDDEN, output_dim=4, init='uniform') # No final sigmoid layer now ]) optimizer = Momentum(learning_rate=0.1, momentum=0.9) loss = SoftmaxCrossEntropy() with tqdm.trange(100) as t: for epoch in t: epoch_loss = 0.0 for x, y in zip(xs, ys): predicted = net.forward(x) epoch_loss += loss.loss(predicted, y) gradient = loss.gradient(predicted, y) net.backward(gradient) optimizer.step(net) accuracy = fizzbuzz_accuracy(101, 1024, net) t.set_description(f"fb loss: {epoch_loss:.3f} acc: {accuracy:.2f}") # Again check results on the test set print("test results", fizzbuzz_accuracy(1, 101, net)) # Load the MNIST data import mnist # This will download the data, change this to where you want it. # (Yes, it's a 0-argument function, that's what the library expects.) # (Yes, I'm assigning a lambda to a variable, like I said never to do.) mnist.temporary_dir = lambda: '/tmp' # Each of these functions first downloads the data and returns a numpy array. # We call .tolist() because our "tensors" are just lists. train_images = mnist.train_images().tolist() train_labels = mnist.train_labels().tolist() assert shape(train_images) == [60000, 28, 28] assert shape(train_labels) == [60000] import matplotlib.pyplot as plt fig, ax = plt.subplots(10, 10) for i in range(10): for j in range(10): # Plot each image in black and white and hide the axes. ax[i][j].imshow(train_images[10 * i + j], cmap='Greys') ax[i][j].xaxis.set_visible(False) ax[i][j].yaxis.set_visible(False) # plt.show() # Load the MNIST test data test_images = mnist.test_images().tolist() test_labels = mnist.test_labels().tolist() assert shape(test_images) == [10000, 28, 28] assert shape(test_labels) == [10000] # Recenter the images # Compute the average pixel value avg = tensor_sum(train_images) / 60000 / 28 / 28 # Recenter, rescale, and flatten train_images = [[(pixel - avg) / 256 for row in image for pixel in row] for image in train_images] test_images = [[(pixel - avg) / 256 for row in image for pixel in row] for image in test_images] assert shape(train_images) == [60000, 784], "images should be flattened" assert shape(test_images) == [10000, 784], "images should be flattened" # After centering, average pixel should be very close to 0 assert -0.0001 < tensor_sum(train_images) < 0.0001 # One-hot encode the test data train_labels = [one_hot_encode(label) for label in train_labels] test_labels = [one_hot_encode(label) for label in test_labels] assert shape(train_labels) == [60000, 10] assert shape(test_labels) == [10000, 10] # Training loop import tqdm def loop(model: Layer, images: List[Tensor], labels: List[Tensor], loss: Loss, optimizer: Optimizer = None) -> None: correct = 0 # Track number of correct predictions. total_loss = 0.0 # Track total loss. with tqdm.trange(len(images)) as t: for i in t: predicted = model.forward(images[i]) # Predict. if argmax(predicted) == argmax(labels[i]): # Check for correct += 1 # correctness. total_loss += loss.loss(predicted, labels[i]) # Compute loss. # If we're training, backpropagate gradient and update weights. if optimizer is not None: gradient = loss.gradient(predicted, labels[i]) model.backward(gradient) optimizer.step(model) # And update our metrics in the progress bar. avg_loss = total_loss / (i + 1) acc = correct / (i + 1) t.set_description(f"mnist loss: {avg_loss:.3f} acc: {acc:.3f}") # The logistic regression model for MNIST random.seed(0) # Logistic regression is just a linear layer followed by softmax model = Linear(784, 10) loss = SoftmaxCrossEntropy() # This optimizer seems to work optimizer = Momentum(learning_rate=0.01, momentum=0.99) # Train on the training data loop(model, train_images, train_labels, loss, optimizer) # Test on the test data (no optimizer means just evaluate) loop(model, test_images, test_labels, loss) # A deep neural network for MNIST random.seed(0) # Name them so we can turn train on and off dropout1 = Dropout(0.1) dropout2 = Dropout(0.1) model = Sequential([ Linear(784, 30), # Hidden layer 1: size 30 dropout1, Tanh(), Linear(30, 10), # Hidden layer 2: size 10 dropout2, Tanh(), Linear(10, 10) # Output layer: size 10 ]) # Training the deep model for MNIST optimizer = Momentum(learning_rate=0.01, momentum=0.99) loss = SoftmaxCrossEntropy() # Enable dropout and train (takes > 20 minutes on my laptop!) dropout1.train = dropout2.train = True loop(model, train_images, train_labels, loss, optimizer) # Disable dropout and evaluate dropout1.train = dropout2.train = False loop(model, test_images, test_labels, loss) if __name__ == "__main__": main() # -
notebooks/19_deep_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python для анализа данных # # ## Бот в Telegram # # ### Сбор статистики по Коронавирусу # # *<NAME>* # ### Есть сайт с информацией о распространении вируса в мире # Я предлагаю достать оттуда информацию в виде Pandas dataframe а затем по запросу пользователя в телеграм выгружать ее # Импортируем requests и запишем адрес сайта со статистикой по COVID в переменную import requests from bs4 import BeautifulSoup url1 = 'https://www.worldometers.info/coronavirus/' # После чего через get-запрос достанем текст страницы со статистикой и распарсим ее с помощью BeautifulSoup url = 'https://www.worldometers.info/coronavirus/' website = requests.get(url).text soup = BeautifulSoup(website, 'lxml') # soup len(soup.find_all('table')) soup.find_all('table')[0] # На странице три таблицы: одна отвечает за информацию на сегодняшний день (под номером 0), а вторая - за информацию на вчерашний день. Достанем обе таблицы (они лежат между тегами \<\table\>\) table = soup.find_all('table')[0] # table # Разобьем таблицу на строки (они лежат между тегами tr) rows = table.find_all('tr') # rows # Сначала достанем заголовки (между тегами th). Для примера достанем заголовок первой колонки. rows[0].find_all('th')[1].get_text().strip() # Теперь мы можем достать содержимое ячеек таблицы (между тегами td). Для примера достанем первую запись первого по счету столбца rows[1].find_all('td')[1].get_text().strip() # Протестируем сборку на примере одной колонки, а потом пробежимся по их списку в цикле # + col1 = [] col1.append(rows[0].find_all('th')[1].get_text().strip()) # отдельно добавляем заголовок for row in rows[1:]: # начинаем со второго ряда таблицы, потому что 0 уже обработали выше r = row.find_all('td') # находим все теги td для строки таблицы col1.append(r[1].get_text().strip()) # сохраняем данные в наш список print(col1) # - col1[:233][-1] # Чтобы еще раз проверить, запустим для другой колонки - TotalCases # + col2 = [] col2.append(rows[0].find_all('th')[2].get_text().strip()) # отдельно добавляем заголовок for row in rows[1:]: # начинаем со второго ряда таблицы, потому что 0 уже обработали выше r = row.find_all('td') # находим все теги td для строки таблицы col2.append(r[2].get_text().strip()) # сохраняем данные в наш список print(col2) # - # А теперь пройдемся циклом по 9 колонкам, сгружая данные в список списков field_list = [] for i in range(9): col = [] col.append(rows[0].find_all('th')[i+1].get_text().strip()) # отдельно добавляем заголовок for row in rows[1:]: # начинаем со второго ряда таблицы, потому что 0 уже обработали выше r = row.find_all('td') # находим все теги td для строки таблицы col.append(r[i+1].get_text().strip()) # сохраняем данные в наш список field_list.append(col) # А вот заголовок второго столбца field_list[2][0] # Остается превратить эту структуру в DataFrame. Для этого сначала превратим ее в словарь из списков, где ключами будут названия колонок. d = dict() for i in range(9): d[field_list[i][0]] = field_list[i][1:] #d # А теперь сгружаем этот словарь в функцию pd.DataFrame и он превращается в искомый DataFrame import pandas as pd df = pd.DataFrame(d) df.head(10) df.head(8) # Для удобства переименуем колонки с составными названиями. df = df.rename(columns={'Country,Other': 'Country', 'Serious,Critical': 'SeriousCritical'}) df.head(8) # А теперь сгрузим весь процесс, который мы проделали, в одну функцию. Ее аргумент tag = 0 - информация за сегодняшний день, a tag=1 - информация за вчерашний день. def stat(tag = 0): url = 'https://www.worldometers.info/coronavirus/' website = requests.get(url).text soup = BeautifulSoup(website, 'lxml') table = soup.find_all('table')[tag] rows = table.find_all('tr') d = dict() for i in range(9): col = [] key = rows[0].find_all('th')[i+1].get_text().strip() for row in rows[1:233]: r = row.find_all('td') col.append(r[i+1].get_text().strip()) d[key] = col df = pd.DataFrame(d) df = df.rename(columns = {'Country,Other':'Country', 'Serious,Critical':'SeriousCritical'}) return df stat().head() list(stat()['Country'][:8]) # В первых 8 строках таблицы в колонке Country лежат регионы + Мир. Одна строка пустая. Уберем ее с помощью спискового включения. print(*list(i+'\n' for i in list(stat()['Country'][:8]) if i!='')) # А с 8 по 232 строки лежат названия стран. Выведем ее с помощью спискового включения по 6 стран в линию (для этого вставим символ переноса строки) x = stat()['Country'][8:233] ', '.join([e+'\n' if i%6 == 5 else e for i,e in enumerate(x)]) x = stat()['Country'][8:233] import re # print(x) print(', '.join([e+'\n' if i%6 == 5 else e for i,e in enumerate(x)]).replace('\n,',',\n')) # Для вывода результатов в текстовом формате воспользуемся библиотекой tabulate (в таком формате мы будем отправлять результаты в telegram'е) # !pip install tabulate from tabulate import tabulate # Например общее количество случаев заболевания коронавирусом в Великобритании вывести можно так: df[['Country','TotalCases']][df['Country']=='UK'].reset_index(drop=True) x = df[['Country','TotalCases']][df['Country']=='UK'].reset_index(drop=True) print(tabulate(x, headers = x.columns, tablefmt="grid")) # + from enum import Enum class Color(Enum): RED = 1 GREEN = 2 BLUE = 3 # - Color.RED from Vedis import vedis pip install Vedis from Vedis import vedis from vedis import Vedis db = Vedis(':mem:') hash_obj = db.Hash('Test') hash_obj['Hello'] = 'Privet' dct = hash_obj.to_dict() dct dct[b'Hello'] db hash_obj db['Hi'] = 'Prive' db['Hii'] = 'Privee' db db['Words'] = {'Hello':'Привет', 'Hi':'Прив'} db['Words'] db['Words'] >>> h = db.Hash('some key') >>> h['k1'] = 'v1' >>> h.update(k2='v2', k3='v3') h h.to_dict() h.items() h.items()[0] db.hkeys('some key') db.Set('Words') # + import sqlite3 conn = sqlite3.connect(":memory:") #conn.row_factory = sqlite3.Row cursor = conn.cursor() # Создание таблицы cursor.execute("""CREATE TABLE albums (title text, artist text, release_date text, publisher text, media_type text) """) # Сохраняем изменения conn.commit() # Вставляем данные в таблицу cursor.execute("""INSERT INTO albums VALUES ('Glow', '<NAME>', '7/24/2012', 'Xplore Records', 'MP3')""" ) # Сохраняем изменения conn.commit() # Вставляем множество данных в таблицу используя безопасный метод "?" albums = [('Exodus', '<NAME>', '7/9/2002', 'Sparrow Records', 'CD'), ('Until We Have Faces', 'Red', '2/1/2011', 'Essential Records', 'CD'), ('The End is Where We Begin', 'Thousand Foot Krutch', '4/17/2012', 'TFKmusic', 'CD'), ('The Good Life', 'Trip Lee', '4/10/2012', 'Reach Records', 'CD')] cursor.executemany("INSERT INTO albums VALUES (?,?,?,?,?)", albums) conn.commit() sql = "SELECT * FROM albums WHERE artist=?" cursor.execute(sql, [("Red")]) print(cursor.fetchall()) # or use fetchone() # -
lect12/12_0_bot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import birdwatcher as bw # **A small test video of a zebra finch is distributed with Birdwatcher** vf = bw.testvideosmall() vf.streammetadata print(f"framecount: {vf.nframes}") print(f"duration: {vf.duration}") # **Iterate of frames in video file. Frames are numpy arrays (height, width, color) of uint8 values,** for frame in vf.iter_frames(): print(frame.shape, end = ', ') # Get a frame by number frame = vf.get_frameat('00:10.') print(frame) # numpy array, uint8 # **Look at frame** from birdwatcher.plotting import imshow_frame # birdwatcher has vizualization tools # %matplotlib inline imshow_frame(frame) from ipywidgets import interact @interact(frameno=(0, vf.framecount-1)) def show_frame(frameno=0): imshow_frame(vf.get_framebynumber(frameno))
notebooks/.ipynb_checkpoints/videofile-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Knn Classifier import pandas as pd import seaborn as sns, numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from sklearn.model_selection import cross_val_score, cross_val_predict from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix, classification_report df_train = pd.read_csv("../data/imageTrainDataSet.csv") df_train.head() df_test = pd.read_csv("../data/imageTestDataSet.csv") df_test.head() df_train.describe() df_train.info() # ## Remove target from dataframe # + X_train=df_train.drop("Target",axis=1) y_train=df_train["Target"] X_test=df_test.drop("Target",axis=1) y_test=df_test["Target"] X_train.head() # - # ## Normalize and split dataframe # + # Normalize dataset scaler_train = MinMaxScaler() X_train = scaler_train.fit_transform(X_train) scaler_test = MinMaxScaler() X_test = scaler_test.fit_transform(X_test) # - # ## Create KNN model and get the best "k" # + k_value = 0 k_index = 0 k_scores = [] k_miss = [] for i in range(1, 100): print("Status:", i+1, "/", "100", end="\r") scores = cross_val_score(KNeighborsClassifier(n_neighbors=i), X_train, y_train, cv=5) accuracy = scores.mean() k_scores.append(scores.mean()) k_miss.append(1 - scores.mean()) if(accuracy > k_value): k_value = accuracy k_index = i plt.plot(range(1, 100), k_scores) plt.xlabel('Value of K for KNN') plt.ylabel('Cross-Validated Accuracy') plt.show() print('The best k is: ', k_index) print('The best accuracy is: ', k_value) # - # ## Plot confusion matrix to analyse predictions # + classifier = KNeighborsClassifier(n_neighbors=k_index) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) cfm = confusion_matrix(y_test, y_pred) # - fig = plot.get_figure() fig.savefig('cfm.png') print(classification_report(y_test, y_pred)) print(classifier.score(X_test,y_test)) plot = sns.heatmap(cfm, cbar=False, annot=True, cmap="Greens", fmt="d") fig = plot.get_figure() fig.savefig('cfm.png')
knn/knnClassifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="Oxp4T6aQZHax" outputId="41a76767-27c9-4c94-c571-cf0714bef4f9" 5+3 # + id="p3-ei_czZfwG" x=6 # + colab={"base_uri": "https://localhost:8080/"} id="5B0BSlVJZp08" outputId="9ec0e74c-d319-49f8-b79a-4d6563f3f1e8" print(x) # + id="oVKIZOL3ZvQM" y=5.2 # + colab={"base_uri": "https://localhost:8080/"} id="6JYeJOugZ79F" outputId="0f119ee2-95a4-43b0-87fc-81bd26503071" print(y) # + colab={"base_uri": "https://localhost:8080/"} id="sRaQmobAaAQ0" outputId="fe3e85a7-dd2e-4b40-99d3-0f35eebc3330" x+y # + colab={"base_uri": "https://localhost:8080/"} id="9ye-fzfZaHA0" outputId="90e47b95-81b4-4c03-9e3a-cfd521c2cd68" x-y # + colab={"base_uri": "https://localhost:8080/"} id="P6KHhzUYf6oS" outputId="d9eb5c4c-e9c0-416b-c81c-469aed2bd89a" x=input("what is your mark for edp") # + id="1su-ZjqVgS-A" x=input("what is your mark for edp") if(int(x))
Python_basics_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf from tensorflow.contrib import seq2seq import numpy as np from matplotlib import pylab as plt import os from datasets_creator import Datasets_creator # %matplotlib inline # - pwd lstm_units = 256 lstm_layers = 2 max_len = 30 MODEL_PATH = './model_save/model.ckpt' creator = Datasets_creator('../datasets/all_couplets.txt', 20000, max_len) char2index, index2char = creator.get_chars_dict() vocabs_size = len(char2index) def build_inputs(): with tf.name_scope('input_placeholders'): input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='input') label = tf.placeholder(shape=(None, None), dtype=tf.int32, name='label') keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob') return input, label, keep_prob def build_lstm_cell(num_units, num_layers, keep_prob, batch_size): with tf.name_scope('lstm_cell'): def lstm(): lstm = tf.nn.rnn_cell.BasicLSTMCell(num_units) dropout = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob) return dropout cell = tf.nn.rnn_cell.MultiRNNCell([lstm() for _ in range(num_layers)]) init_zero_state = cell.zero_state(batch_size, tf.float32) return cell, init_zero_state def build_lstm_layer(cell, embed_input, init_state): with tf.name_scope('lstm_layer'): outputs, final_state = tf.nn.dynamic_rnn(cell, embed_input, initial_state=init_state) return outputs, final_state def build_forward(cell, input, init_state): one_hot = tf.one_hot(input, vocabs_size, axis=-1) outputs, final_state = build_lstm_layer(cell, one_hot, init_state) logits = tf.layers.dense(outputs, vocabs_size, name='fc_layer') outputs = tf.nn.softmax(logits) return outputs, logits, final_state def pick_char_from_top_n(preds, vocab_size, top_n=5): p = np.squeeze(preds) p[1] = 0 p[0] = 0 p[np.argsort(p)[:-top_n]] = 0 p = p / np.sum(p) c = np.random.choice(vocab_size, 1, p=p)[0] return c def sample(prime, top_n=5): samples = list(prime) tf.reset_default_graph() input_pl, label_pl, keep_prob_pl = build_inputs() cell_op, init_zero_state_op = build_lstm_cell(lstm_units, lstm_layers, tf.cast(tf.shape(input_pl)[0], tf.float32), 1) init_state_op = init_zero_state_op outputs_op, _, final_state_op = build_forward(cell_op, input_pl, init_state_op) sess = tf.Session() sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, MODEL_PATH) init_state = sess.run(init_state_op) for char in prime: x = np.zeros((1, 1), dtype=np.int32) x[0, 0] = char2index.get(char, 1) feed_dict = { input_pl: x, keep_prob_pl: 1, init_state_op: init_state } outputs, final_state = sess.run([outputs_op, final_state_op], feed_dict=feed_dict) init_state = final_state if len(prime) != 0: pick_char_index = pick_char_from_top_n(outputs, vocabs_size, top_n) samples.append(index2char[pick_char_index]) else: pick_char_index = 0 while True: x = np.zeros((1, 1), dtype=np.int32) x[0, 0] = pick_char_index feed_dict = { input_pl: x, keep_prob_pl: 1, init_state_op: init_state } outputs, final_state = sess.run([outputs_op, final_state_op], feed_dict=feed_dict) init_state = final_state pick_char_index = pick_char_from_top_n(outputs, vocabs_size, top_n) pick_char = index2char[pick_char_index] samples.append(pick_char) if pick_char == '。': break sess.close() return ''.join(samples) sample(prime='', top_n=3) tf.logging.set_verbosity(tf.logging.ERROR) for _ in range(10): print(sample(prime='卓云科技,', top_n=10))
tensorflow_imp_char/Couplets_Sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First Goal Prediction Models # With the data stored in a easily queryable way, now we can start building some basic models to try to predict: # - The scoreline # - The result (A, D, H) - utilising the scoreline estimates # Will utilise scipy, sklearn and statsmodels along with some useful helper functions in dspy personal repo # + from IPython.display import display, Markdown import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy.stats import poisson import seaborn as sns import warnings from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import statsmodels.api as sm from epl.dspy_display import statsmodels_pretty_print, pp_conf_matrices, pp_conf_matrix from epl.dspy_preprocess_utils import apply_feature_scaling from epl.dspy_eval_utils import statsmodels_create_eval_df from epl.features_parse import get_feat_col_names from epl.feature_utils import home_away_to_team_opp, create_goal_probs, create_match_prediction_stats, create_poisson_prediction_output, eval_df_to_match_eval_df from epl.query import create_and_query pd.options.display.max_columns = None warnings.filterwarnings('ignore') # - # ## 1. Fetch Data # ### 1a. Match Data # Need to import the raw match data and reformat so we have a row per team per match (rather than a row per match with 2 teams) # + # get key cols for join to features along with useful id data and goal/result data match_key_cols = ['Date', 'HomeTeam', 'AwayTeam'] id_cols = ['Country', 'Div', 'Season'] match_other_cols = ['FTHG', 'FTAG', 'FTR'] # for now restrict to only the top div in each country wc = {'Div': ['IN', ['E0', 'SC0', 'B1', 'D1', 'F1', 'I1', 'SP1', 'P1']], 'Season': ['<>', '9394']} # - match_cols = match_key_cols + id_cols + match_other_cols df_matches = create_and_query('matches', cols=match_cols, wc=wc) df_matches.tail(5) # Display how many matches we have per div df_matches[['FTR', 'Div']].groupby(['Div']).count().sort_values(['FTR'], ascending=False) # ### 1b. Create Train/Test Split At Match Level # Before we split into individual team per match format, we need to define train/test split so we estimate goals for complete matches # We do this by splitting pre-conversion and adding an extra col 'train'=True if training, else test # + test_frac = 1/10 seed = 123 train_matches, test_matches = train_test_split(df_matches, test_size = test_frac, random_state=seed, stratify=df_matches['FTR']) train_matches['Train'] = True test_matches['Train'] = False df_matches = pd.concat([train_matches, test_matches]) # - # Now we convert so we have a row per team per match print("Converting match df with {:,} rows of matches".format(len(df_matches))) df_m = home_away_to_team_opp(df_matches) print("Now have df with {:,} rows".format(len(df_m))) df_m.tail() # ### 1b. Feature Data # Now we need to define which features we want # # For now, we will pull in everything we have and then we can form various models below using different iterations of the data df_feats = create_and_query('features', wc=wc) df_feats.tail(5) # ## 2. Join Data # Now we have our matches and features, we need to join them together by: # - Joining on for both Team and Opp # - Joining on fixture relevant data i.e. if a team is at Home, join on their recent GFH instead of GFA data and vice versa feat_id_cols = list(set(id_cols + ['Date', 'Team', 'Location'])) feat_cols = [x for x in df_feats.columns if x not in feat_id_cols] print("We have {} feature cols to join for each of team and opp".format(len(feat_cols))) # + feat_join_key = ['Date', 'Team'] feat_cols_team = ['Team'+x for x in feat_cols] df_feats_team = df_feats.rename(columns=dict(zip(feat_cols, feat_cols_team))) team_cols = feat_join_key + feat_cols_team feat_cols_opp = ['Opp'+x for x in feat_cols] df_feats_opp = df_feats.rename(columns=dict(zip(feat_cols, feat_cols_opp))) opp_cols = feat_join_key + feat_cols_opp # - df = pd.merge(left=df_m, right=df_feats_team[team_cols], how='left', left_on=feat_join_key, right_on=feat_join_key) df = pd.merge(left=df, right=df_feats_opp[opp_cols], how='left', left_on=['Date', 'Opp'], right_on=feat_join_key, suffixes=('', '_DROP')) df = df.drop(columns=[x for x in df.columns if x[-5:] == '_DROP']) df.tail() # ## 3. Simple Model - Always Home # Could just create a col of 'H' for comparison with FTR, but will go through the process of: # - Building a Poisson GLM model with only Home as a factor # - Compute the resulting predicted lambdas # - Compute the associated Poisson distributed score distributions per team # - Compute the associated scoreline matrix per match # - Compute the associated probability of each result: (A, D, H) # ### Preprocess # #### 3a. Select and Prepare $X$, $y$ Data # Features will be split into 3: # - Features to be untouched # - Features to be standardised i.e. $\hat{x} = \frac{x - \mu}{\sigma}$ # - Features to be normalised i.e. $\hat{x} = \frac{x - min(x)}{max(x) - min(x)}$ # + pred_col = 'GF' train_col = ['Train'] feats = ['Home'] stand_feats = [] norm_feats = [] all_feats = feats + stand_feats + norm_feats df_est = df[[pred_col] + train_col + all_feats].dropna() print('From original df with {:,} obs, have dropped {:,} NaNs'.format(len(df), len(df) - len(df_est))) df_est.tail() # - # #### 3b. Train/Test Split # Given data is grouped as matches, we have done the split earlier in 1b and so here we just utilise that column to split the data # + X = df_est[all_feats] y = df_est[pred_col] x_train = df_est[df_est.Train][all_feats] x_test = df_est[~df_est.Train][all_feats] y_train = df_est[df_est.Train][pred_col] y_test = df_est[~df_est.Train][pred_col] print('Using {:,} cases for training'.format(x_train.shape[0])) print('---') print('Using {:,} cases for testing'.format(x_test.shape[0])) print('---') print('Using following factors to predict: {}'.format(', '.join(all_feats))) # - # #### 3c. Scale Features (if required) x_train, std_scaler, norm_scaler = apply_feature_scaling(x_train, stand_feats, norm_feats) # ### Fit and Inspect Model Params # #### 3d. Fit # Utilise statsmodels [GLM model](https://www.statsmodels.org/stable/glm.html) to estimate a [Poisson Regression](https://en.wikipedia.org/wiki/Poisson_regression) # boolean to decide to add intercept term (sklearn does by default) add_int = True # create a Poisson GLM class poisson_model = sm.GLM(y_train, (sm.add_constant(x_train) if add_int else x_train), family=sm.families.Poisson()) # fit the model model = poisson_model.fit() model_output = statsmodels_pretty_print(model, alpha=0.05, dp=2, model_overview=True, param_sort_cols=['Sig', 'coef']) model_output # #### 3e. Inspect # + display(Markdown("Home advantage exists - model says Home Team {:.2f}x more likely to score (and very statistically significant)".format(model_output['coef']['Home']))) display(Markdown("Can compare model outputs to training data averages for verification:")) # - train_avgs = pd.concat([y_train, x_train], axis=1).groupby(['Home']).mean() train_avgs display(Markdown( '''We can see that: - The mean away goals is {:.2f} --> lines up with the 'const' coef - The mean home goals is {:.2f} --> lines up with {:.2f} * {:.2f} = {:.2f} So our simple model matches _on average_ the data it was trained on (a good start) '''.format(train_avgs.values[0][0], train_avgs.values[1][0], model_output['coef']['const'], model_output['coef']['Home'], model_output['coef']['Home'] * model_output['coef']['const']) )) # ### Create Poisson Distributions and Result Predictions # #### 3f. Create Poisson $\lambda$, Scoreline Predictions and FTR Prediction # Below applies the fitted preprocessing transforms (if any) to all the data (train _and_ test) eval_df = statsmodels_create_eval_df('all', model, add_int, X, y, x_train, x_test, y_train, y_test, stand_feats, norm_feats, std_scaler, norm_scaler) eval_df.tail() # create the poisson distribution per team and then form by match eval_df = create_poisson_prediction_output(eval_df, df, ['Country', 'Div', 'Season', 'Train']) eval_df = eval_df.sort_values(['Date', 'Country', 'Div']) eval_df.tail() # ### Evaluate Model # #### 3g. Accuracy # Now we have our model predictions and true values together, we can eval # __Accuracy Metrics__ list_of_act_preds = [ {'act': eval_df[eval_df['Train']]['FTR'], 'pred': eval_df[eval_df['Train']]['FTRPred'], 'label': 'Train' }, {'act': eval_df[~eval_df['Train']]['FTR'], 'pred': eval_df[~eval_df['Train']]['FTRPred'], 'label': 'Test' }, {'act': eval_df['FTR'], 'pred': eval_df['FTRPred'], 'label': 'All' } ] pp_conf_matrices(list_of_act_preds, max_val=0.5) # Given we have a simple 1 factor model for 'Home' only it is not surprising: # - We only ever predict home due to the higher lambda for all home games # - Home advantage holds for approx ~46% of games which is the accuracy of the model # # This gives us a baseline --> ~46% is the min required to beat a naive model that only predicts home # #### 3h. Accuracy by Score # Can now try to bucket accuracy by scoreline # + eval_score_df = eval_df[['FTR', 'FTRPred', 'FTHG', 'FTAG']] eval_score_df['Correct'] = eval_score_df['FTR'] == eval_score_df['FTRPred'] acc_by_score = eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).sum() / eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).count() fig, axes = plt.subplots(ncols=2, figsize=(20,7)) sns.heatmap(pd.pivot_table(acc_by_score, index='FTHG', columns='FTAG', values='Correct').fillna(0), cmap='Blues', fmt='.2%', annot=True, ax=axes[0]); sns.heatmap(pd.pivot_table(eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).count() / eval_score_df.shape[0], index='FTHG', columns='FTAG', values='Correct').fillna(0), cmap='Blues', fmt='.2%', annot=True, ax=axes[1]); # - # From above: # - Right heatmap shows % of games that end in a given scoreline e.g. 1-1 draws happen 12% of the time # - Left heatmap shows from each scoreline, what % of them we predict correctly # - Clearly we predict all home wins correctly as that is all we ever predict (at the detriment of 0% of draws and away wins) # ## 4. Slighlty Less Simple Model - Long Run GF/GA # ### 4a. GF / GA Exploration # Before creating the model, we can check to see whether or not we anticipate historical Goals Scored/For (GF) and Goals Conceded/Against (GA) to have any impact on the scoreline of a game # + actual_cols = ['GF', 'GA'] team_gf = ['TeamAvgGF_'+str(x) for x in [3,5,10,20,40,80]] opp_ga = ['OppAvgGA_'+str(x) for x in [3,5,10,20,40,80]] team_ga = ['TeamAvgGA_'+str(x) for x in [3,5,10,20,40,80]] opp_gf = ['OppAvgGF_'+str(x) for x in [3,5,10,20,40,80]] df_gfga = df[['Date', 'Div', 'Season', 'Team', 'Opp'] + actual_cols + team_gf + opp_ga + team_ga + opp_gf] df_gfga.tail() # - # #### Historical Team GF vs GF # Can plot rolling historical averages vs GF in a game to try and tease out any meaningful correlations # + nc = 3 nr = 2 fig, axes = plt.subplots(ncols=nc, nrows=nr, figsize=(10*nc, 6*nr)) axes = axes.reshape(-1) for gf,a in zip(team_gf[::-1], axes): sns.boxplot(data=df_gfga, x='GF', y=gf, palette='Blues', ax=a, showfliers = False); a.set_title(gf) # - # Takeaways: # - Overall positive correlation across the board --> a higher rolling avg goals scored can only be positive for expected GF # - Stronger positive correlation over a longer time horizon e.g. 80 vs 3 # - This may be because over 3 games you may be a good team but play 3 also very good teams i.e. __the historical rolling averages are not taking into account the team they were scored against__ # #### Historical Opp GA vs GF # Can now do the same for how may goals yur opponent has let in on avg over the last $n$ games # + nc = 3 nr = 2 fig, axes = plt.subplots(ncols=nc, nrows=nr, figsize=(10*nc, 6*nr)) axes = axes.reshape(-1) for gf,a in zip(opp_ga[::-1], axes): sns.boxplot(data=df_gfga, x='GF', y=gf, palette='Reds', ax=a, showfliers = False); a.set_title(gf) # - # Takeaways: # - Again, on average a positive correlation across all --> play 'on average' a 'leaky' opponent and you are expected to score more goals # - Again, longer time horizons appear to have a higher correlation than shorter ones # #### Team GA vs GF # Would still expect this to have an impact, but: # - Less so vs Team GF i.e. how does your defence impact your goals scored # - Potentially would vanish once correlation with Team GF taken account of i.e. if you score goals then your opponent less likely to have the ball to score against you # + nc = 3 nr = 2 fig, axes = plt.subplots(ncols=nc, nrows=nr, figsize=(10*nc, 6*nr)) axes = axes.reshape(-1) for gf,a in zip(team_ga[::-1], axes): sns.boxplot(data=df_gfga, x='GF', y=gf, palette='Purples', ax=a, showfliers = False); a.set_title(gf) # - # #### Opp GF vs GF # + nc = 3 nr = 2 fig, axes = plt.subplots(ncols=nc, nrows=nr, figsize=(10*nc, 6*nr)) axes = axes.reshape(-1) for gf,a in zip(opp_gf[::-1], axes): sns.boxplot(data=df_gfga, x='GF', y=gf, palette='Oranges', ax=a, showfliers = False); a.set_title(gf) # - # As expected both 4c and 4d display less correlation but still could be worth including # __Now let's try a basic model of only including long term averages i.e. only 40 and 80 (roughly 1 & 2 prem league seasons worth of results)__ # ### 4b. Preprocess # Now let's try and fit various models # + pred_col = 'GF' train_col = ['Train'] feats = ['Home'] stand_feats = [x for x in (team_gf + opp_ga + team_ga + opp_gf) if '40' in x or '80' in x] norm_feats = [] all_feats = feats + stand_feats + norm_feats df_est = df[[pred_col] + train_col + all_feats].dropna() print('From original df with {:,} obs, have dropped {:,} NaNs'.format(len(df), len(df) - len(df_est))) df_est.tail() # - # __Train/Test Split__ # + X = df_est[all_feats] y = df_est[pred_col] x_train = df_est[df_est.Train][all_feats] x_test = df_est[~df_est.Train][all_feats] y_train = df_est[df_est.Train][pred_col] y_test = df_est[~df_est.Train][pred_col] print('Using {:,} cases for training'.format(x_train.shape[0])) print('---') print('Using {:,} cases for testing'.format(x_test.shape[0])) print('---') print('Using following factors to predict: {}'.format(', '.join(all_feats))) # - # __Apply Scaling__ x_train, std_scaler, norm_scaler = apply_feature_scaling(x_train, stand_feats, norm_feats) # ### 4c. Fit Model # boolean to decide to add intercept term (sklearn does by default) add_int = True # create a Poisson GLM class poisson_model = sm.GLM(y_train, (sm.add_constant(x_train) if add_int else x_train), family=sm.families.Poisson()) # fit the model model = poisson_model.fit() model_output = statsmodels_pretty_print(model, alpha=0.05, dp=2, model_overview=True, param_sort_cols=['Sig', 'coef']) model_output # Comments on GLM Regression results: # - Home adv still the most important factor i.e. it still gives 1.36x more likely to score at home # - As _kinda_ demonstrated in the above series of boxplots, historical avgs of GF are most correlated i.e. TeamGF_40 and TeamGF_80 # - All coefficients are of expected sign (TeamGF>1, OppGA>1 and vice versa) and all highly significant (apart from TeamGA_80) # ### 4d. Eval eval_df = statsmodels_create_eval_df('all', model, add_int, X, y, x_train, x_test, y_train, y_test, stand_feats, norm_feats, std_scaler, norm_scaler) eval_df.tail() # create the poisson distribution per team and then form by match eval_df = create_poisson_prediction_output(eval_df, df, ['Country', 'Div', 'Season', 'Train']) eval_df = eval_df.sort_values(['Date', 'Country', 'Div']) eval_df.tail() list_of_act_preds = [ {'act': eval_df[eval_df['Train']]['FTR'], 'pred': eval_df[eval_df['Train']]['FTRPred'], 'label': 'Train' }, {'act': eval_df[~eval_df['Train']]['FTR'], 'pred': eval_df[~eval_df['Train']]['FTRPred'], 'label': 'Test' }, {'act': eval_df['FTR'], 'pred': eval_df['FTRPred'], 'label': 'All' } ] pp_conf_matrices(list_of_act_preds, max_val=0.5) # So by adding these long run performance features we: # - Start predicting some away wins(~23% away) # - Still never predict a draw # - __Improve our accuracy vs the simplistic 'H' only model by ~5%__ # + eval_score_df = eval_df[['FTR', 'FTRPred', 'FTHG', 'FTAG']] eval_score_df['Correct'] = eval_score_df['FTR'] == eval_score_df['FTRPred'] acc_by_score = eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).sum() / eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).count() fig, axes = plt.subplots(ncols=2, figsize=(20,7)) sns.heatmap(pd.pivot_table(acc_by_score, index='FTHG', columns='FTAG', values='Correct').fillna(0), cmap='Blues', fmt='.2%', annot=True, ax=axes[0]); sns.heatmap(pd.pivot_table(eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).count() / eval_score_df.shape[0], index='FTHG', columns='FTAG', values='Correct').fillna(0), cmap='Blues', fmt='.2%', annot=True, ax=axes[1]); # - # Compared to the H-only model: # - We still predict a large % of the H matches correctly # - We start to predict some of the A matches correctly (at the detriment of 100% for home matches) # - Struggle most to predict low scoring away victories (34% 0-1, 41% 0-2, 34% 1-2) despite them being more common in practise # # It's an improvement, but the key to better results seems to lie in: # - Predicting draws # - Predicting low scoring away victories # # Home advantage still seems to have too big an impact on the estimated $\lambda$ that can't be overcome by (currently) included features # ## 5. Even Less Simple Model - Long + Short Run GF/GA # Now we try to incorporate some of the more short term averages in an attempt to incorporate 'form' # # As noted above: # - they appear to have a lower correlation with GF than longer run averages # - this is likely as they are simple linear averages and each goal is not weighted by likelihood of scoring i.e. quality of opposition is not taken into account # ### 5a. Preprocess # Now let's try and fit various models # + pred_col = 'GF' train_col = ['Train'] feats = ['Home'] stand_feats = team_gf + opp_ga + team_ga + opp_gf norm_feats = [] all_feats = feats + stand_feats + norm_feats df_est = df[[pred_col] + train_col + all_feats].dropna() print('From original df with {:,} obs, have dropped {:,} NaNs'.format(len(df), len(df) - len(df_est))) df_est.tail() # - # __Train/Test Split__ # + X = df_est[all_feats] y = df_est[pred_col] x_train = df_est[df_est.Train][all_feats] x_test = df_est[~df_est.Train][all_feats] y_train = df_est[df_est.Train][pred_col] y_test = df_est[~df_est.Train][pred_col] print('Using {:,} cases for training'.format(x_train.shape[0])) print('---') print('Using {:,} cases for testing'.format(x_test.shape[0])) print('---') print('Using following factors to predict: {}'.format(', '.join(all_feats))) # - # __Apply Scaling__ x_train, std_scaler, norm_scaler = apply_feature_scaling(x_train, stand_feats, norm_feats) # ### 4c. Fit Model # boolean to decide to add intercept term (sklearn does by default) add_int = True # create a Poisson GLM class poisson_model = sm.GLM(y_train, (sm.add_constant(x_train) if add_int else x_train), family=sm.families.Poisson()) # fit the model model = poisson_model.fit() model_output = statsmodels_pretty_print(model, alpha=0.05, dp=2, model_overview=True, param_sort_cols=['Sig', 'coef']) model_output # Results very similar to long run avg model - little gained # ### 4d. Eval eval_df = statsmodels_create_eval_df('all', model, add_int, X, y, x_train, x_test, y_train, y_test, stand_feats, norm_feats, std_scaler, norm_scaler) eval_df.tail() # create the poisson distribution per team and then form by match eval_df = create_poisson_prediction_output(eval_df, df, ['Country', 'Div', 'Season', 'Train']) eval_df = eval_df.sort_values(['Date', 'Country', 'Div']) eval_df.tail() list_of_act_preds = [ {'act': eval_df[eval_df['Train']]['FTR'], 'pred': eval_df[eval_df['Train']]['FTRPred'], 'label': 'Train' }, {'act': eval_df[~eval_df['Train']]['FTR'], 'pred': eval_df[~eval_df['Train']]['FTRPred'], 'label': 'Test' }, {'act': eval_df['FTR'], 'pred': eval_df['FTRPred'], 'label': 'All' } ] pp_conf_matrices(list_of_act_preds, max_val=0.5) # So by adding these short run performance features we: # - Still predicting some away wins(~23% away) # - Still never predict a draw # - __Accuracy remains unch vs the long run avg only model__ # + eval_score_df = eval_df[['FTR', 'FTRPred', 'FTHG', 'FTAG']] eval_score_df['Correct'] = eval_score_df['FTR'] == eval_score_df['FTRPred'] acc_by_score = eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).sum() / eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).count() fig, axes = plt.subplots(ncols=2, figsize=(20,7)) sns.heatmap(pd.pivot_table(acc_by_score, index='FTHG', columns='FTAG', values='Correct').fillna(0), cmap='Blues', fmt='.2%', annot=True, ax=axes[0]); sns.heatmap(pd.pivot_table(eval_score_df[['Correct','FTHG', 'FTAG']].groupby(['FTHG', 'FTAG']).count() / eval_score_df.shape[0], index='FTHG', columns='FTAG', values='Correct').fillna(0), cmap='Blues', fmt='.2%', annot=True, ax=axes[1]); # - # As per above, very little gained vs the long run only model # # __We need to find a way to incorporate more information on _quality_ of goals into the short run measures so they can more accurately reflect 'form'__
notebooks/First Models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] azdata_cell_guid="1f608a1d-2436-4b48-80d4-5c4d2f8ca7d0" # # Dynamics 365 Business Central Troubleshooting Guide (TSG) - Usage of deprecated web service protocols # # This notebook contains Kusto queries that can help determining if one ore more environments are still using web services build on the depreated Odata v3 protocol. Queries go back 30 days # # NB! The signal used in this notebook is only available in versions 16.1 (or newer) of Business Central, so check the version of your environment if you don't see any data. # # Web service request signal is documented here: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace # + [markdown] azdata_cell_guid="f103fae9-cf6d-40f7-9062-11ce50691046" # ## 1\. Get setup: Load up Python libraries and connect to Application Insights # First you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it? <span style="color: rgb(0, 128, 0); font-family: Consolas, &quot;Courier New&quot;, monospace; font-size: 12px; white-space: pre;">Install&nbsp;instructions:&nbsp;https://github.com/microsoft/BCTech/tree/master/samples/AppInsights/TroubleShootingGuides</span>) and connect to your Application Insights resource (get appid and appkey from the API access page in the Application Insights portal) # + azdata_cell_guid="2215d24c-babd-46aa-a4f6-6988159487c7" # load the KQLmagic module # %reload_ext Kqlmagic # + azdata_cell_guid="a253fa8e-6ac2-4722-a00a-1c52aedab4ed" tags=[] # Connect to the Application Insights API # %kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>' # + [markdown] azdata_cell_guid="9ef1220c-d9cc-4552-9297-1428efcafb32" # ## 2\. Define filters # # This workbook is designed for troubleshooting environments in an AAD subscription. Please provide values for aadTenantId below (or use a config file). # + azdata_cell_guid="0a0785f7-a85e-4ccf-9020-732e1d4c058a" tags=[] # TSG filter variables # You can either use configuration file (INI file format) or set filters directly. # If you specify a config file, then variables set here takes precedence over manually set filter variables # config file name and directory (full path) configFile = "c:/tmp/notebook.ini" # Add AAD tenant id and environment name here aadTenantId = "MyaaDtenantId" environmentName = "MyEnvironment" # date filters for the analysis # use YYYY-MM-DD format for the dates (ISO 8601) startDate = "2020-11-20" endDate = "2020-11-24" # Do not edit this code section import configparser config = configparser.ConfigParser() config.read(configFile) if bool(config.defaults()): if config.has_option('DEFAULT', 'aadTenantId'): aadTenantId = config['DEFAULT']['aadTenantId'] if config.has_option('DEFAULT', 'environmentName'): environmentName = config['DEFAULT']['environmentName'] if config.has_option('DEFAULT', 'extensionId'): extensionId = config['DEFAULT']['extensionId'] if config.has_option('DEFAULT', 'startDate'): startDate = config['DEFAULT']['startDate'] if config.has_option('DEFAULT', 'endDate'): endDate = config['DEFAULT']['endDate'] print("Using these parameters for the analysis:") print("----------------------------------------") print("aadTenantId " + aadTenantId) print("environmentName " + environmentName) print("startDate " + startDate) print("endDate " + endDate) # + [markdown] azdata_cell_guid="5f9b698d-8a7e-4757-b27d-02f219d6c589" # # Analyze usage of web services # # Either click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries # # Telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace # # KQL samples # * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql # * https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/DeprecatedWebServiceProtocols.kql # + [markdown] azdata_cell_guid="810235a1-c202-47a2-b9dc-4d4d1ffd123c" # ## Request stats (last 30 days) # The first report shows statistics for all categories. # # The second report shows statistics only for deprecated categories. # # + azdata_cell_guid="6426410c-182a-486c-8466-228cb9ce3228" tags=[] # %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and timestamp > ago(30d) and customDimensions.aadTenantId == _aadTenantId and (_environmentName == '' or customDimensions.environmentName == _environmentName ) and customDimensions.eventId == 'RT0008' | extend category = tostring( customDimensions.category ) | summarize request_count=count() by category, bin(timestamp, 1d) | render timechart title= 'Number of web service requests by category (ODatav4, ODatav3, SOAP, API)' # + azdata_cell_guid="2da40a82-82ec-44a2-b13b-6086e11cdfb0" # %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and timestamp > ago(30d) and customDimensions.aadTenantId == _aadTenantId and (_environmentName == '' or customDimensions.environmentName == _environmentName ) and customDimensions.eventId == 'RT0008' and customDimensions.category == 'ODataV3' | extend category = tostring( customDimensions.category ) | summarize request_count=count() by category, bin(timestamp, 1d) | render timechart title= 'Number of web service requests by category (deprecated categories)' # + [markdown] azdata_cell_guid="2f9c2d0d-df3c-482b-af58-48416a517117" # ## Web service endpoints using deprecated protocols # # The following endpoints are exposed ad web service endpoints using a deprecated protocol (query shows the first 100) # + azdata_cell_guid="a9e923e9-1d05-4acf-a230-4c5142bc3582" tags=[] # %%kql let _aadTenantId = aadTenantId; let _environmentName = environmentName; traces | where 1==1 and timestamp > ago(30d) and customDimensions.aadTenantId == _aadTenantId and (_environmentName == '' or customDimensions.environmentName == _environmentName ) and customDimensions.eventId == 'RT0008' and customDimensions.category == 'ODataV3' | project aadId = tostring( customDimensions.aadTenantId ) , environmentName = tostring( customDimensions.environmentName ) , category = tostring( customDimensions.category ) , endpoint = tostring( customDimensions.endpoint ) | distinct aadId , environmentName , category , endpoint | limit 100
samples/AppInsights/TroubleShootingGuides/D365BC Troubleshooting Guides (TSG)/content/Deprecated-Webservice-protocols-TSG.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Image Formation and Features # ### CS655000 Computer Vision Homework 1 # ### Brief # * Due: Wed, 10/16, 23:59 # * Use Python to complete the homework. # * If you encounter any problem, let’s discuss on iLMS instead of email. # # ## Part 1. Harris Corner Detection # # With the Harris corner detector described in slides (p.79), mark the detected corners on the image. # <img style="float: left;" src="1.PNG" width="60%"> # # ### A. Functions: # * `gaussian_smooth()`: filter images with Gaussian blur. # * `sobel_edge_detection()`: apply the Sobel filters to the blurred images and compute the magnitude and direction of gradient. (You should eliminate weak gradients by proper threshold.) # * `structure_tensor()`: use the gradient magnitude above to compute the structure tensor (second-moment matrix). # * `nms()`: perform non-maximal suppression on the results above along with appropriate threshold for corner detection. # # ### B. Results: # * a. Original image # * i. Gaussian smooth results: 𝜎=5 and kernel size=5 and 10 (**2 images**) # * ii. Sobel edge detection results # * (1) magnitude of gradient (Gaussian kernel size=5 and 10) (**2 images**) # * (2) direction of gradient (Gaussian kernel size=5 and 10) (**2 images**) # (You can choose arbitrary color map to display) # * iii. Structure tensor + NMS results (Gaussian kernel size=10) # * (1) window size = 3x3 (**1 image**) # * (2) window size = 30x30 (**1 image**) # * b. Final results of rotating (by 30°) original images (**1 image**) # * c. Final results of scaling (to 0.5x) original images (**1 image**) # # ### C. Report: # * a. Discuss the results of blurred images and detected edges between different kernel sizes of Gaussian filter. # * b. Discuss the difference between 3x3 and 30x30 window sizes of structure tensor. # * c. Discuss the effect of non-maximal suppression. # * d. Discuss the results of rotated and scaled image. Is Harris detector rotation-invariant or scale-invariant? Explain the reason. # # ### D. Notice: # * a. You should **NOT** use any functions which can get the result directly in each steps. (`cv2.Sobel`, `cv2.Laplacian`, `cv2.cornerHarris`, `skimg.feature.local_binary_pattern`, etc.) # * b. Your code should display and output image results mentioned above. # * c. You should provide a README file about your execution instructions.
Harris Corner Detection/ReadMe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pokedex2] # language: python # name: conda-env-pokedex2-py # --- # + # Create new Pokemon from rdflib import Graph, Literal, BNode, Namespace from rdflib.namespace import FOAF, RDF g = Graph() # Friend of a Friend (FOAF) is an ontology describing people g.bind("foaf", FOAF) pokedex = Namespace("https://michiganpython.org/pokedex/") pikachu = pokedex.pikachu catezar = BNode() # a GUID is generated name = Literal("Pikachu") age = Literal(24) g.add((pikachu, RDF.type, pokedex.Pokemon)) g.add((pikachu, FOAF.name, name)) g.add((pikachu, FOAF.age, age)) g.add((pikachu, FOAF.knows, catezar)) g.add((catezar, RDF.type, pokedex.Pokemon)) g.add((catezar, FOAF.name, Literal("Catezar"))) # Serialize rdf to Terse RDF Triple Language (Turtle) print(g.serialize()) # + # Load a small rdf file in n-triples format from pprint import pprint path = "demo.nt" g = Graph() g.parse(path) print(len(g)) for statement in g: pprint(statement) # + # Pokedex RDF graph loading in n-triples format from rdflib import Graph path = "pokedex.nt" g = Graph() g.parse(path) print(f"rdflib Graph loaded successfully with {len(g)} triples") # + # Find a triple that represents the Bulbasaur label from rdflib import RDFS, URIRef sub_graph = Graph() bulbasaur = URIRef("http://pokedex.dataincubator.org/pokemon/1") for s, p, o in g.triples((bulbasaur, RDFS.label, None)): print(f"{s} {p} {o}") # + # Load triples related to the first 10 Pokemon sub_graph = Graph() pkm = Namespace("http://pokedex.dataincubator.org/pkm/") poke = "http://pokedex.dataincubator.org/pokemon/" for num in range(1, 10): pokemon = URIRef(f"{poke}{num}") sub_graph += g.triples((pokemon, pkm.colour, None)) sub_graph += g.triples((pokemon, pkm.name, None)) sub_graph += g.triples((pokemon, pkm.type, None)) sub_graph += g.triples((pokemon, RDFS.label, None)) print(f"rdflib sub graph loaded successfully with {len(sub_graph)} triples") # + # Convert rdf graph to networkx graph from rdflib.extras.external_graph_libs import rdflib_to_networkx_graph nx_graph = rdflib_to_networkx_graph(g) print(f"Networkx {nx_graph} loaded successfully") # + # Load csv file with Pokemon types import pandas as pd df = pd.read_csv('pokemon-types.csv') df.head() # + # Create networkx graph from Dataframe import networkx as nx nx_type1_graph = nx.from_pandas_edgelist(df, 'Name', 'Type1') filtered_df = df.dropna() nx_type2_graph = nx.from_pandas_edgelist(filtered_df, 'Name', 'Type2') nx_graph = nx.compose(nx_type1_graph, nx_type2_graph) nx.greedy_color(nx_graph) print(nx.info(nx_graph)) # + from pyvis.network import Network nt = Network("800px", "100%", notebook=True) nt.from_nx(nx_graph) nt.show_buttons(filter_=['physics']) nt.show("nx.html")
network-graphs-semantic-web/pokedex-graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="OVzGf91VHb9R" outputId="26351c12-2ee9-49c8-cb3c-893db004bb4a" # #!/usr/bin/env python """ 5. Pflichtübung für Maschinelles Lernen an der HS Mannheim zu k-nearest Neighbor""" # + id="3Y5AuoLAK_Q6" __maintainer__ = "<NAME>" # + id="scV6J7jsGCfe" import string import numpy as np import matplotlib.pyplot as plt import math import random import copy from enum import Enum from typing import List, Tuple # Pathfile and kNN-Value fName = '/spiral.txt' kValue = 3 # Vanilla-Daten spiral_data = [] # + id="MWyvyqz7iceb" #@dataclass class Node: def __init__(self, coord: List[float]) -> None: self.x = coord[0] self.y = coord[1] self.color = coord[2] def getX(self) -> float: return self.x def getY(self) -> float: return self.y def getColor(self) -> str: return self.color # + colab={"base_uri": "https://localhost:8080/"} id="Gux-zkcfr-lm" outputId="8f2cebca-6854-4aa1-e82e-a9d4c4d047f3" # Vanilladaten verarbeiten def proc_data(path: string, mode: string) -> None: f = open(path, mode) for x in f: x = x.strip() x = x.split(";") spiral_data.append(Node([float(i) for i in x])) f.close() # Dateneingabe für Vanilladaten if 'google.colab' in str(get_ipython()): from google.colab import drive drive.mount('/content/drive', force_remount=True) proc_data("/content/drive/My Drive%s" % (fName), "r") else: proc_data("%s" % (fName), "r") # + id="jVzFQJdTHZUd" class Knn: _x, _y, _color = [], [], [] def __init__(self, listOfNodes: List[float], kValue: int) -> None: self._data = copy.deepcopy ( listOfNodes ) self._k = kValue self.__init_data() def __init_data(self) -> None: for node in self._data: self._x.append(node.getX()) self._y.append(node.getY()) if(node.getColor()==-1): self._color.append('blue') else: self._color.append('red') def add_node(self, newNode: Node) -> None: # Neue Liste für Abstände list_elements = [] # Berechne Abstände der Nodes list_elements = list((self.__get_distance(prototype_node, newNode), prototype_node.getColor()) for prototype_node in spiral_data) # Zugehörige Klasse des neuen Node bestimmen calculated_class = self.__calc_neighbor(list_elements) # neuen Node mit dazugehörige Klasse hinzufügen self._x.append(newNode.getX()) self._y.append(newNode.getY()) self._color.append(calculated_class) # Farbe bestimmen des neuen Node def __calc_neighbor(self, list_elements: Tuple[float, int]) -> str: self.__Sort(list_elements) blue, red = 0,0 for index in range(self._k): if list_elements[index][1]==1: red += 1 else: blue += 1 if (blue > red): return 'blue' else: return 'red' # Listenelemente sortieren def __Sort(self, to_sort: Tuple[float, int]) -> Tuple[float, int]: to_sort.sort(key = lambda x: x[0], reverse=False) return to_sort # Distanz zwischen zwei Nodes berechnen def __get_distance(self, fromNode: Node, toNode: Node) -> List[Node]: return math.sqrt(((fromNode.getX() - toNode.getX()) ** 2) + (fromNode.getY() - toNode.getY()) ** 2) # Visualisierung def visualisation(self) -> None: plt.scatter(self._x, self._y, c=self._color) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 302} id="JgAMFpwubcI5" outputId="14a8401b-0b74-40d0-de92-73fabb217a8e" # create vanilla-data: #dh = Datahandler(fName) # Erstes Bild _____________ k1 = Knn(spiral_data, kValue) #0,01 Schritten x, y = -1.0, 1.0 for y_idx in range(200): y -= 0.01 for x_idx in range(200): x += 0.01 k1.add_node(Node([x, y, 0])) x = -1.0 k1.visualisation() ''' # Zweites Bild _____________ k2 = Knn(spiral_data, 5) #0,01 Schritten x, y = -1.0, 1.0 for y_idx in range(200): y -= 0.01 for x_idx in range(200): x += 0.01 k2.add_node(Node([x, y, 0])) x = -1.0 k2.visualisation() '''
K Nearest Neighbor/KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python3 # This script takes a sdf file for a set molecules with a SMILES column as input. OpenEye # functionalities are used to convert SMILES format to .mae format of Schrodinger. Schrodinger # Epik is used to generate protomers, tautomers and pKas. from openmoltools import openeye as omtoe, schrodinger import pandas as pd import os import numpy as np from ast import literal_eval from openeye.oechem import * import pickle # + ##### IMPORT STARTING SET OF MOLECULES AS SMILES ##### # Import list of available molecules which were exported from eMolecules website. df = pd.read_csv("./df_eMol_sim_unique_molecules_smiles.smi") df.columns = ["index", "canonical isomeric SMILES", "eMolecules SKU"] initial_number_of_molecules = df.shape[0] print("Starting from isomeric SMILES of {} molecules.".format(initial_number_of_molecules)) # Get SMILES and eMolecules SKU as a dictionary. print("Extracting SMILES and eMolecules SKU from input file...") eMolSKU_smiles_dict = {} #for i in range(initial_number_of_molecules): for i in range(10): smiles = df.loc[i,"canonical isomeric SMILES"] emol_sku = df.loc[i,"eMolecules SKU"] eMolSKU_smiles_dict[emol_sku] = smiles print(eMolSKU_smiles_dict) # Save "eMolecules SKU: canonical isomeric SMILES" dictionary as a pickle file pickle.dump(eMolSKU_smiles_dict, open("eMolSKU_can_iso_smiles_dict.pickle", "wb")) # + ##### CONVERT SMILES TO OEMOL ##### print("Converting SMILES to OEMol...") eMolSKU_oemol_dict = {} for key, value in eMolSKU_smiles_dict.items(): # Create a OEMolBuilder from a smiles string. oemol_molecule = omtoe.smiles_to_oemol(smiles=value) eMolSKU_oemol_dict[key] = oemol_molecule print(eMolSKU_oemol_dict) # + ##### GENERATE CHARGED CONFORMERS AND SAVE AS MOL2 FILE ##### mol2_directory_path = "./mol2_files" if not os.path.exists(mol2_directory_path): os.makedirs(mol2_directory_path) print("{} directory created.".format(mol2_directory_path)) print("Generating charged OEMol molecules...") # Dictionary to keep track of failed molecules failed_molecules_dict = {} # Generate charges for an OpenEye OEMol molecule. It will return molecule with OpenEye's recommended AM1BCC # charge selection scheme. for key, value in eMolSKU_oemol_dict.items(): print("Generating conformer for ", key, "...") try: oe_molecule = omtoe.get_charges(value, keep_confs=1) except RuntimeError: print("Conformation generation failed for {}.".format(key)) # Save failed molecule to failed_molecules_dict failed_molecules_dict[key] = value mol2_filename = mol2_directory_path + "/" + str(key) + ".mol2" omtoe.molecule_to_mol2(oe_molecule, tripos_mol2_filename=mol2_filename) print("Mol2 file {} generated.".format(mol2_filename)) print("") print("Conformer generation for {} molecules failed.".format(len(failed_molecules_dict))) # Remove failed molecules from oMolID_oemol_dict dictionary for key, value in failed_molecules_dict.items(): eMolSKU_oemol_dict.pop(key, None) print("{} molecules removed from the list.".format(len(failed_molecules_dict))) # Save dictionary of successful conformers as spickle file pickle.dump(eMolSKU_oemol_dict, open("eMolSKU_oemol_dict.pickle", "wb")) # Save dictionary of failed molecules as confromer generation as a pickle file pickle.dump(failed_molecules_dict, open("failed_molecules_dict.pickle", "wb")) # + ##### RUN EPIK ##### print("Running Epik with sequencial pKa prediction method...") mae_directory_path = "./mae_files" if not os.path.exists(mae_directory_path): os.makedirs(mae_directory_path) print("{} directory created.".format(mae_directory_path)) # Sequencial pKa calculation method is used starting form pH 7.0. count=0 for key in eMolSKU_oemol_dict.keys(): print("Running Epik for molecule {} ...".format(key)) mol2_file_path = mol2_directory_path + "/" + str(key) + ".mol2" mae_file_path = mae_directory_path + "/" + str(key) + ".mae" schrodinger.run_epik(mol2_file_path, mae_file_path, max_structures=100, ph=7.0, ph_tolerance=None, tautomerize=True, extract_range=None, max_atoms=150, scan=True) count=count+1 print("Epik calculation for %s out of %s molecules finished."%(count,len(eMolSKU_oemol_dict))) # + ##### CONVERT EPIK OUTPUT (.MAE FILE) TO SDF ##### #sdf_directory_path = "./sdf_files" #if not os.path.exists(sdf_directory_path): # os.makedirs(sdf_directory_path) # print("{} directory created.".format(sdf_directory_path)) #for key in eMolID_oemol_dict.keys(): # mae_file_path = mae_directory_path + "/" + str(key) + ".mae" # sdf_file_path = sdf_directory_path + "/" + str(key) + ".sdf" # # Run Schrodinger's structconvert command line utility to convert mae file to sdf # print("Converting Epik output to SDF for molecule {} ...".format(key)) # schrodinger.run_structconvert(input_file_path = mae_file_path, output_file_path = sdf_file_path) # + ##### RUN PROPLISTER TO EXTRACT PKAS ##### print("Running proplister to extract pKas from Epik output.") # Create a dictionary to store predicted pKas predicted_pKa_dict = {} # Iterate over molecules for key in eMolSKU_oemol_dict.keys(): mae_file_path = mae_directory_path + "/" + str(key) + ".mae" proplister = schrodinger.run_proplister(input_file_path=mae_file_path) # Iterate over properties of each molecule # Record predicted pKa values in a list pKa_list = [] for propkey, value in proplister[0].items(): if propkey.startswith("r_epik_pKa"): pKa = float(value) pKa_list.append(pKa) pKa_list = sorted(pKa_list, key=float) predicted_pKa_dict[key] = pKa_list print("Predicted pKa dictionary: {eMolecules SKU : pKas}") print(predicted_pKa_dict) # + ##### ANALYZE PKA PREDICTIONS TO COUNT 3 <= PKAS <= 11 ##### # Create a pandas dataframe to store pKa information df_pKa = pd.DataFrame(list(predicted_pKa_dict.items()), columns=["eMolecules SKU", "predicted pKas"]) df_pKa["pKas in [3,11]"]=np.NaN df_pKa["pKa count in [3,11]"]=np.NaN for i, row in df_pKa.iterrows(): # Count pKas that are within 3-11 interval pKa_in_interval_count = 0 pKas_in_interval = [] pKas = row["predicted pKas"] for pKa in pKas: if (3<= pKa) and (pKa <= 11): pKa_in_interval_count = int(pKa_in_interval_count + 1) pKas_in_interval.append(pKa) df_pKa.loc[i,"pKa count in [3,11]"] = pKa_in_interval_count #print(pKas_in_interval) df_pKa.loc[i,"pKas in [3,11]"] = str(pKas_in_interval) # Flag molecules with pKas that are closer than 1 log unit df_pKa["pKas closer than 1 unit"]=False for index, row in df_pKa.iterrows(): # print(row["pKas in [3,11]"]) pKas = literal_eval(row["pKas in [3,11]"]) if len(pKas)> 1: # The difference between consecutive pKas must be >= 1. If not, we will mark True. for i, pKa in enumerate(pKas[0:(len(pKas)-1)]): pKa_difference = float(pKas[i+1]) - float(pKas[i]) if pKa_difference < 1: df_pKa.loc[index, "pKas closer than 1 unit"]=True else: continue # Add Canonical Isomeric SMILES to dataframe df_pKa["canonical isomeric SMILES"] = np.NAN for i, row in df_pKa.iterrows(): key = row["eMolecules SKU"] smiles = eMolSKU_smiles_dict[key] df_pKa.loc[i,"canonical isomeric SMILES"] = smiles df_pKa.to_csv("df_pKa.csv") #print(df_pKa) df_pKa # + ##### REMOVE COMPOUNDS THAT DON'T HAVE PKAS WITHIN 3-11 INTERVAL ##### df_pKa_interval = df_pKa.loc[df_pKa["pKa count in [3,11]"] >= 1.0].reset_index(drop=True) df_pKa_interval.to_csv("df_pKa_interval_3-11.csv") print("Number of molecules with at least 1 pKa in 3-11 interval: ", df_pKa_interval.shape[0]) ##### REMOVE COMPOUNDS THAT HAVE MORE THAN 4 PKAS WITHIN 3-11 INTERVAL ##### df_pKa_interval = df_pKa_interval.loc[df_pKa_interval["pKa count in [3,11]"] <= 4.0].reset_index(drop=True) df_pKa_interval.to_csv("df_pKa_interval_3-11.csv") print("Number of molecules with at most 4 pKa in 3-11 interval: ", df_pKa_interval.shape[0]) #print(df_pKa_interval) df_pKa_interval # + ##### REMOVE COMPOUNDS WITH PKA CLOSER THAN 1 LOG UNIT ##### df_pKa_interval_spread = df_pKa_interval.loc[df_pKa_interval["pKas closer than 1 unit"]==False].reset_index(drop=True) df_pKa_interval_spread.to_csv("df_pKa_interval_3-11_spread.csv") print("Number of molecules with pKa in 3-11 interval and spread*: ", df_pKa_interval_spread.shape[0]) print("* pKa values of each molecule are not closer than 1 log unit.") #print(df_pKa_interval_spread) df_pKa_interval_spread # - print("Done.") # Example to read pickle files # import pickle # dictionary = pickle.load(open("eMolID_can_iso_smiles_dict.pickle", "rb"))
compound_selection/zinc15_eMolecules_similarity_set/20170730_eMolecules_similarity_set_pKa_filter/pKa_filter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Slicer 4.13 # language: python # name: slicer-4.13 # --- # ## Image Processing using SimpleITK # ### Clear scene and load sample data set using SimpleITK # + import JupyterNotebooksLib as slicernb import SimpleITK as sitk import sitkUtils as su # Clear scene slicer.mrmlScene.Clear(False) # Load 3D image using SimpleITK reader = sitk.ImageFileReader() reader.SetFileName("data/MRBrainTumor1.nrrd") image = reader.Execute() # - # ### Display SimpleITK image # + volumeNode = su.PushVolumeToSlicer(image) # Prevent automatic brightness/contrast update to make the processed images easier to compare volumeNode.GetDisplayNode().SetAutoWindowLevel(False) # Set up view layout slicer.app.layoutManager().setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView) # Set up slice views slicer.util.setSliceViewerLayers(background=volumeNode, fit=True) # Show volume rendering slicernb.showVolumeRendering(volumeNode, presetName='MR-Default') slicer.util.resetThreeDViews() # Display views slicernb.ViewDisplay() # - # ### Process image and show results # + blurFilter = sitk.SmoothingRecursiveGaussianImageFilter() blurFilter.SetSigma(3.0) blurredImage = blurFilter.Execute(image) su.PushVolumeToSlicer(blurredImage, targetNode=volumeNode) slicernb.ViewDisplay() # - # ### Process the image using another filter and show results # + blurFilter2 = sitk.CurvatureFlowImageFilter() blurFilter2.SetNumberOfIterations(5) blurFilter2.SetTimeStep(0.15) blurredImage2 = blurFilter2.Execute(image) su.PushVolumeToSlicer(blurredImage2, targetNode=volumeNode) slicernb.ViewDisplay() # - # #### Run this example in your browser using Binder: [![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/Slicer/SlicerNotebooks/master?filepath=03_Image_Processing_using_SimpleITK.ipynb)
03_Image_Processing_using_SimpleITK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp common.residue_constants # - # # residue_constants # # > amino acid residue constants used in AlphaFold. #hide from nbdev.showdoc import * # Amino acids are normally represented as one-letter symbols #export restypes = [ 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V' ] # The number of residue types #export restype_num = len(restypes) assert restype_num == 20 # As we can see, the one-letter symbols are not alphabetically ordered. We can easily get the order of each residue in the sequence by enumerating them #export restype_order = {restype: i for i, restype in enumerate(restypes)} restype_order # Sometimes we'd also encounter unknown residues. We'll order them as the last in the queue. #export unk_restype_index = restype_num assert unk_restype_index == 20 # Residues can also be represented as three-letter symbols. we can build a mapping between the two as a dict restype_1to3 = { 'A': 'ALA', 'R': 'ARG', 'N': 'ASN', 'D': 'ASP', 'C': 'CYS', 'Q': 'GLN', 'E': 'GLU', 'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'L': 'LEU', 'K': 'LYS', 'M': 'MET', 'F': 'PHE', 'P': 'PRO', 'S': 'SER', 'T': 'THR', 'W': 'TRP', 'Y': 'TYR', 'V': 'VAL', } # and the reverse #export restype_3to1 = {v: k for k, v in restype_1to3.items()} restype_3to1 # Sometimes we'll also encounter unkown amino acids, we'll use the same one-letter symbol `X`, or three-letter symbol `UNK` to represent them all #export unk_restype = 'UNK' # we can now collect all the three-letter symbols, together with the unknow, into a list of residue names #export resnames = [restype_1to3[r] for r in restypes] + [unk_restype] resnames
00_common.residue_constants.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- # ## [Writing an Echo Server](https://netty.io/wiki/user-guide-for-4.x.html#writing-an-echo-server) mkdir -p src/main/java/io/netty/example/echo # + cat << EOF > src/main/java/io/netty/example/echo/EchoServerHandler.java package io.netty.example.echo; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.util.ReferenceCountUtil; /** * Handles a server-side channel. */ public class EchoServerHandler extends ChannelInboundHandlerAdapter { // (1) @Override public void channelRead(ChannelHandlerContext ctx, Object msg) { // (2) try { // Echo the received data. ctx.write(msg); } finally { // ReferenceCountUtil.release(msg); // (3) } } @Override public void channelReadComplete(final ChannelHandlerContext ctx) { ctx.flush(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { // (4) // Close the connection when an exception is raised. cause.printStackTrace(); ctx.close(); } } EOF # + cat << EOF > src/main/java/io/netty/example/echo/EchoServer.java package io.netty.example.echo; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; /** * Echoes any incoming data. */ public class EchoServer { private int port; public EchoServer(int port) { this.port = port; } public void run() throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(); // (1) EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); // (2) b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) // (3) .childHandler(new ChannelInitializer<SocketChannel>() { // (4) @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new EchoServerHandler()); } }) .option(ChannelOption.SO_BACKLOG, 128) // (5) .childOption(ChannelOption.SO_KEEPALIVE, true); // (6) // Bind and start to accept incoming connections. ChannelFuture f = b.bind(port).sync(); // (7) // Wait until the server socket is closed. // In this example, this does not happen, but you can do that to gracefully // shut down your server. f.channel().closeFuture().sync(); } finally { workerGroup.shutdownGracefully(); bossGroup.shutdownGracefully(); } } public static void main(String[] args) throws Exception { int port = 8888; if (args.length > 0) { port = Integer.parseInt(args[0]); } new EchoServer(port).run(); } } EOF # - # ### Build # #### Add Netty to pom.xml # # ``` bash # cat pom.xml # ``` # # ``` xml # ... # <!-- https://mvnrepository.com/artifact/io.netty/netty-all --> # <dependency> # <groupId>io.netty</groupId> # <artifactId>netty-all</artifactId> # <version>4.1.70.Final</version> # <scope>compile</scope> # </dependency> # ... # ``` # + cat << EOF > pom.xml <?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>io.netty.example.echo</groupId> <artifactId>an-official-demo</artifactId> <version>1.0-SNAPSHOT</version> <name>an-official-demo</name> <url>https://seii-saintway.github.io/</url> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <maven.compiler.source>1.8</maven.compiler.source> <maven.compiler.target>1.8</maven.compiler.target> </properties> <dependencies> <!-- https://mvnrepository.com/artifact/io.netty/netty-all --> <dependency> <groupId>io.netty</groupId> <artifactId>netty-all</artifactId> <version>4.1.70.Final</version> <scope>compile</scope> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.11</version> <scope>test</scope> </dependency> </dependencies> <build> <plugins> <plugin> <artifactId>maven-clean-plugin</artifactId> <version>3.1.0</version> </plugin> <plugin> <artifactId>maven-resources-plugin</artifactId> <version>3.0.2</version> </plugin> <plugin> <artifactId>maven-compiler-plugin</artifactId> <version>3.8.0</version> <configuration> <excludes> <exclude>**/.ipynb_checkpoints/*.java</exclude> </excludes> </configuration> </plugin> <plugin> <artifactId>maven-surefire-plugin</artifactId> <version>2.22.1</version> </plugin> <plugin> <artifactId>maven-jar-plugin</artifactId> <version>3.0.2</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>3.3.0</version> <configuration> <archive> <manifest> <mainClass>io.netty.example.echo.EchoServer</mainClass> </manifest> </archive> <descriptorRefs> <descriptorRef>jar-with-dependencies</descriptorRef> </descriptorRefs> </configuration> <executions> <execution> <phase>package</phase> <goals> <goal>single</goal> </goals> </execution> </executions> </plugin> </plugins> </build> </project> EOF # - mvn clean mvn package # + # jar -tf target/an-official-demo-1.0-SNAPSHOT-jar-with-dependencies.jar # - # ### Run java -jar target/an-official-demo-1.0-SNAPSHOT-jar-with-dependencies.jar # ### Test telnet localhost 8888
an-official-demo/1-Writing-an-Echo-Server.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark # language: '' # name: pysparkkernel # --- # # EMR Notebook Sample # # This is notebook shows how EMR notebooks can make it easy to do development and analytics with PySpark. # # The dataset is an export of the Consumer Financial Protection Bureau database # See https://www.consumerfinance.gov/ and specifically http://files.consumerfinance.gov/ccdb/complaints.csv.zip # # After the session, if you'd like to use this data please get your OWN copy by unzipping the original into S3 bucket in your own AWS account. # # Setup Notes # - EMR 5.32+ requires Spark, Livy and JupyterEnterpriseGateway packages # - To use this notebook, your cluster should have been launched with nltk-bootstrap.sh script # - You cluster needs EBS volume increase. Suggest 15GB # input_data_in_s3 = "s3://heiwad-transfer/data-sets/cfpb-complaints.csv" # + # Direct load data from S3 # https://stackoverflow.com/questions/40413526/reading-csv-files-with-quoted-fields-containing-embedded-commas df = spark.read.load(input_data_in_s3, # please get your own copy after the session format="csv", sep=",", inferSchema="true", header="true", quote = '"', escape='"') df.printSchema() # + #Try simple aggregation to find top companies represented in the data set res = df.groupby("Company").count().orderBy('count',ascending=False) res.show(20) # + # get just the complaints column and simplify the column name complaints = df.select("Consumer complaint narrative").withColumnRenamed("Consumer complaint narrative","text") # Let's sample some of the text for this column for complaint in complaints.head(8): if complaint['text']: print('* ' + complaint['text'] + '\n') # - # # Let's Find out what they are complaining about # # Let's start with starting to count the words represented in the data. But not all words are useful so we'll filter out the common words that can be omitted from a sentence and still have it make some sense. # # These *stop words* will have high counts and aren't very useful for NLP so we will filter them out. # # The NLP library, including stop-words dictionary, was installed via bootstrap script on all nodes in the cluster. # + # Note - stopwords dictionary was installed via bootstrap script from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) #--Ignore word fragments from suppressing PII in the data set stop_words.add('xx') stop_words.add('xxxx') stop_words # + # Tokenize via regex - very quick, could be better. import re def emitWords(row): if row['text']: words = [] tokens = re.split('\W+',row['text'].lower()) for token in tokens: stripped = token.strip("$.,1234567890\\/';{}~!?-") if stripped and (stripped not in stop_words): words.append(stripped) return words else: return [] # test - see how emit words parses the following sentence (code local to leader node) emitWords({'text':"running. $949 . can't stop. won't stop? runners run on runs"}) # + # We can apply functions that change the shape of data by applying flatMap on the underlying rdd. This is a 'map-reduce' style operation on Spark counts = complaints.rdd.flatMap(emitWords) \ .map(lambda word: (word, 1)) \ .reduceByKey(lambda a, b: a + b) top_words = counts.top(15, key=lambda x: x[1]) top_words # - # EMR Notebooks can install Python packages on the leader node. This is useful for viewing or charting data. # # https://aws.amazon.com/blogs/big-data/install-python-libraries-on-a-running-cluster-with-emr-notebooks/ # # list installed packages sc.list_packages() sc.install_pypi_package("pandas==1.2.5") sc.install_pypi_package("matplotlib") import matplotlib import pandas as pd import matplotlib.pyplot as plt top_words_pd = pd.DataFrame(top_words,columns=['words','count']) # + top_words_pd.sort_values(by='count').plot.barh(x='words', y='count', rot=0,figsize=(10,10)) # Use Jupyter Magic to show the plot # %matplot plt # - # Try checking the frequency of words you thought would be common below by replacing "happy" with anything else. counts.filter(lambda x: "happy" == x[0]).collect() # ## Using custom python libraries installed on the cluster # # The previous language model is very simple (regex). Many english words have various versions that mean more or less the same thing. If we want to break the words (run vs runs) and make sure the root is always a word this is called lemmatization. We can use language models like NLTK. # + # Apply lemmatization to the keys to combine counts for words that mean the same thing # language model installed on cluster via bootstrap action from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() def lemmatize_record(record): (word, count) = record return (lemmatizer.lemmatize(word),count) # See lemmatiation in action... words = [('go',1),('goes',1), ('run',1), ('runs',1)] for record in words: lemma =lemmatize_record(record) print (f"{record[0]} becomes {lemma[0]}") # - # Try lemmatization on the word counts and then compare if the top words have changed. # + combined = counts.map(lemmatize_record).reduceByKey(lambda a, b: a + b) top_words_combined = combined.top(15, key=lambda x: x[1]) top_words_combined_pd = pd.DataFrame(top_words_combined,columns=['words','count']) both_pd = pd.merge( top_words_pd, top_words_combined_pd, how="left", on='words', left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=("_regex", "_lemmatized"), copy=True, indicator=False, validate=None, ) both_pd.set_index('words').sort_values(by='count_regex',ascending = True).plot.barh(figsize=(10,10)) # %matplot plt # - # Optional: Specify S3 bucket in your own account if you'd like to save the results back to Amazon S3 # + #Export Results back to S3 output_s3_bucket_name= "<bucket_name>" # just the bucket name output_path="complaints" s3_out = f"s3://{s3_bucket_name}/{path}" combined.saveAsTextFile(s3_out) # -
cfpb-complaints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- #hide #skip # %config Completer.use_jedi = False # upgrade fastrl on colab ! [ -e /content ] && pip install -Uqq fastrl['dev'] pyvirtualdisplay && \ apt-get install -y xvfb python-opengl > /dev/null 2>&1 # NOTE: IF YOU SEE VERSION ERRORS, IT IS SAFE TO IGNORE THEM. COLAB IS BEHIND IN SOME OF THE PACKAGE VERSIONS # hide from fastcore.imports import in_colab # Since colab still requires tornado<6, we don't want to import nbdev if we don't have to if not in_colab(): from nbverbose.showdoc import * from nbdev.imports import * if not os.environ.get("IN_TEST", None): # assert IN_NOTEBOOK assert not IN_COLAB assert IN_IPYTHON else: # Virtual display is needed for colab from pyvirtualdisplay import Display display = Display(visible=0, size=(400, 300)) display.start() # + # default_exp data.block # - # export # Python native modules import os from collections import deque from copy import deepcopy from time import sleep # Third party libs from fastcore.all import * from fastai.torch_basics import * from fastai.data.all import * from fastai.basics import * from fastai.callback.all import * from torch.utils.data import Dataset from torch import nn import torch import gym import numpy as np # Local modules from fastrl.core import * from fastrl.callback.core import * from fastrl.agent import * # Test imports import pybulletgym # # Data Block Simple # > Stripped down simpler environment execution code. # Development of this was helped by [IterableData documentation on multiple workers](https://github.com/pytorch/pytorch/blob/4949eea0ffb60dc81a0a78402fa59fdf68206718/torch/utils/data/dataset.py#L64) # # This code is heavily modifed from https://github.com/Shmuma/ptan # # Reference for env [semantics related to vectorized environments](https://github.com/openai/universe/blob/master/doc/env_semantics.rst) # # Useful links: # - [torch multiprocessing](https://github.com/pytorch/pytorch/blob/a61a8d059efa0fb139a09e479b1a2c8dd1cf1a44/torch/utils/data/dataloader.py#L564) # - [torch worker](https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/worker.py) # # This notebook walks through a more advanced usage of the `Loop` class. # exports def parse_events(loop:L): return loop.filter(lambda s:in_('event.',s)).map(lambda s:s.replace('event.','')) # ## Source # > The base iterable used for iterating through environments. # + # exports _loop=L(['event.after_create','Start Setup','event.initialize','End Setup', 'event.before_episodes', 'Start Episodes', 'event.reset', 'event.do_action', 'event.do_step', 'event.render', 'event.history', 'End Episodes', 'event.after_episodes' ]) mk_class('source_events', **parse_events(_loop).map_dict(), doc="All possible events as attributes to get tab-completion and typo-proofing") _all_=['source_events'] # + # exports class Source(Loop): _loop=_loop _events=source_events _default='source' end_event=parse_events(_loop)[-1] @delegates(Loop) def __init__(self,cbs=None,**kwargs): super().__init__(cbs=cbs,**kwargs) store_attr(but='cbs') self.idx=0 def after_create(self): self('initialize') return self def __iter__(self): while True: self.idx+=1 self('do_step') yield {'this':torch.full((1,5),self.idx)} # exports _loop=L(['event.after_create','Start Setup','event.initialize','End Setup', 'event.before_episodes', 'Start Episodes', 'event.reset', 'event.do_action', 'event.do_step', 'event.do_render', 'event.before_history', 'event.history', 'event.after_history', 'End Episodes', 'event.after_episodes' ]) mk_class('source_events', **parse_events(_loop).map_dict(), doc="All possible events as attributes to get tab-completion and typo-proofing") _all_=['source_events'] # exports def return_data(o:dict): source,data=o['source'],o['history'] return {'source':source,'history':data} class Source(Loop): _loop=_loop _events=source_events _default='source' end_event=parse_events(_loop)[-1] @delegates(Loop) def __init__(self,cbs=None,test_mode=False,**kwargs): self.idx=0 self.data_fields='state,next_state,done,all_done,env_id,worker_id'\ ',action,episode_id,accum_rewards,reward,step,image'.split(',') self.ignore_fields=[] self.test_field=torch.full((1,5),self.idx) self.return_fn=return_data self.loop_history_yield=False store_attr(but='cbs',state=None,next_state=None,done=None,all_done=None, env_id=0,action=None,episode_id=0,accum_rewards=0,reward=0,step=0, skip_history_return=False) super().__init__(cbs=cbs,**kwargs) def after_create(self): self('initialize') return self def _history(self): self.loop_history_yield=False self('history') if self.test_mode: self.this=torch.full((1,5),self.idx) if 'test_field' not in self.data_fields: self.data_fields.append('test_field') return self.return_fn(dict(source=self,history=self.data()))['history'] def data(self)->BD: return BD({s:(ifnone(getattr(self,s,None),TensorBatch([[0]])) if self.test_mode else getattr(self,s)) for s in self.data_fields if not in_(s,self.ignore_fields)}) def __iter__(self): self('before_episodes') while True: self.idx+=1 self('reset') self('do_action') self('do_step') if self.test_mode: self.test_field=torch.full((1,5),self.idx) self('do_render') self('before_history') if not self.skip_history_return: yield self._history() while self.loop_history_yield: yield self._history() self('after_history') # - # So the `Source` object does a simple loop that returns a dictionary. # This is going to be similar to what the rest of fastrl will be expecting. source=Source(test_mode=True) for x,_ in zip(iter(source),range(10)): print(x) Source(test_mode=True).show_loop() # ## Base PyTorch # This section covers the basic dataloader in pytorch with the source object. from torch.utils.data import DataLoader source=Source() try: print(list(DataLoader(iter(source),batch_size=10))) except TypeError as e: print(e) # Ok so the initial attempt failed. This is because we need to indicate this is an iterable dataset # that contains `items` that are each the `Source` instance. Ok so lets make this an # iterable dataset. # exports class SourceDataset(IterableDataset): "Iterates through a `source` object. Allows for re-initing source connections when `num_workers>0`" def __init__(self,source=None): self.source=source def __iter__(self): source=iter(self.source) yield from source # export class VerboseChecked(LoopCallback): _methods=source_events def __init__(self,show_worker_id=True,show_env_id=True): store_attr() def initialize(self): self.source.data_fields=['worker_id'] worker_id=get_worker_info() self.source.worker_id=TensorBatch([[worker_id.id]]) if worker_id is not None else None if self.show_worker_id: print('Worker id: ',worker_id) self.env=gym.make('HumanoidPyBulletEnv-v0') self.env.reset() def do_step(self): if self.show_env_id: print('Env Id: ',id(self.env)) from torch.utils.data import DataLoader,IterableDataset source=Source(test_mode=True,cbs=VerboseChecked) source.after_create() dataset=SourceDataset(source) for x in DataLoader(dataset,batch_size=3): print(str(x).replace(' ','').replace('\n','')) break source=Source(test_mode=True,cbs=VerboseChecked) source.after_create() dataset=SourceDataset(source) for x,i in zip(DataLoader(dataset,batch_size=2,num_workers=0),range(6)): print(str(x).replace(' ','').replace('\n','')) # ## Fastai Compatibility # Now lets get this working with the fastai API! from fastai.data.all import * # ### TfmdList Compatability # > Note: First issue we run into: It would be nice to leverage the transform API and the TfmdLists # would be great in case we want to execute transforms on the returned items. In this case, # we want to forgo the `SourceDataset` since we want to use the `TfmdList`s instead. # > Note: Additional note, I wonder what the real difference is between a `TfmdList` and a `Dataset`? source=Source(test_mode=True,cbs=VerboseChecked) # dataset=SourceDataset(source) # So from the looks of the config below, this should be fine right? We have an iterable dataset, # so we indicate that it is not indexed, and that shuffling wouldn't make sense. # # There is some strange things we need to do to actually make this work with defaults. We need to do `type_tfms` # on the items since they need to be iterables. We then need to do `item_tfms` to tell fastai that it is supposed # to try to iterate through these are opposed to simply "pushing" them through the tfm pipeline. # # Let's see if this works! try: dls=DataBlock( blocks=TransformBlock( dls_kwargs={'indexed':False,'shuffle':False}), ).dataloaders([source],n=15,bs=10,num_workers=0,verbose=True) for x in dls[0]:print(x) except TypeError as e: print(e) # Oops! Seems like instead of iterating through the item, it is just passing the item into the collation mechanism. # Let's manually make the items iterable! try: dls=DataBlock( blocks=TransformBlock( type_tfms=[lambda o: o.after_create(),lambda o:iter(o)], dls_kwargs={'indexed':False,'shuffle':False}), ).dataloaders([source],n=15,bs=10,num_workers=0,verbose=True) for x in dls[0]:print(x) except TypeError as e:print(e) # Ok,ok lets also tell it to pull items out of the generator... dls=DataBlock( blocks=TransformBlock( type_tfms=[lambda o: o.after_create(),lambda o:iter(o)], item_tfms=lambda o:next(o), dls_kwargs={'indexed':False,'shuffle':False}), ).dataloaders([source],n=15,bs=10,num_workers=0,verbose=True) for x in dls[0]:print(x) # Huh... It only loops once since there is only 1 "item" in the list. This is not desirable behavior since this is # an iterable. The number of loops should be able to be arbitrarily defined via `n` and `bs` especially if the items # don't have a length to them. There are a couple additional worries that I have: # - We may not want to call `iter` of the items until they are loaded onto a worker/passed to a process. This is due to # many/all iterable sources not being picklable. The `type_tfms` might do this too early. # - Why do we need to define the `item_tfms` above in the first place? the dataloader should understand that the # item is iterable to just pull from it? # # You might wonder why we can't just pass a source directly into the `DataBlock`, however anything passed needs to have a len... try: dls=DataBlock( blocks=TransformBlock( type_tfms=[lambda o: o.after_create(),lambda o:iter(o)], item_tfms=lambda o:next(o), dls_kwargs={'indexed':False,'shuffle':False}) ).dataloaders(source) except TypeError as e: print(e) # Ok... so how do get `dls` to iterate more than just the number of items? Well, the first issue is... # + # TfmdLists.__iter__?? # - # And... # + # Datasets.__iter__?? # - # So even though we have gone through the work to indicate that these items and the overall dataset does not have a length, # we still are constrained by a `len` call. This seems to be a fundamental issue with the `TfmdLists`. Maybe we can trick it into # thinking there are `n` items when there really is only one... # + # exports from itertools import cycle class IterableTfmdLists(TfmdLists): def __iter__(self): return cycle(self[i] for i in range(len(self))) class IterableDatasets(Datasets): def __iter__(self): return cycle(self[i] for i in range(len(self))) # - # Now that we have a custom `TfmdLists` that will cycle through all the items, we need to modfy # `DataBlock` and `Datasets` to accept these. # + # exports class IterableDataBlock(DataBlock): tl_type = TfmdLists datasets_type = Datasets @delegates(DataBlock) def __init__(self,blocks=None,datasets_type=None,n_inp=None,**kwargs): blocks = L(self.blocks if blocks is None else blocks) for b in blocks: if getattr(b, 'tl_type', None) is not None: self.tl_type = b.tl_type if datasets_type is not None: self.datasets_type=datasets_type if (not is_listy(blocks) or len(blocks)==1) and n_inp is not None: n_inp=0 super().__init__(blocks=blocks,n_inp=n_inp, **kwargs) def datasets(self, source, verbose=False): self.source = source ; pv(f"Collecting items from {source}", verbose) items = (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose) splits = (self.splitter or RandomSplitter())(items) # pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose) return self.datasets_type(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose, tl_type=self.tl_type) @patch def __init__(self:Datasets, items=None, tfms=None, tls=None, n_inp=None, dl_type=None,tl_type=TfmdLists, **kwargs): super(Datasets,self).__init__(dl_type=dl_type) self.tls = L(tls if tls else [tl_type(items, t, **kwargs) for t in L(ifnone(tfms,[None]))]) self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1)) class TransformBlock(): "A basic wrapper that links defaults transforms for the data block API" def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=None, dl_type=None, dls_kwargs=None, tl_type=None): self.type_tfms = L(type_tfms) self.item_tfms = ToTensor + L(item_tfms) self.batch_tfms = L(batch_tfms) self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs) self.tl_type = tl_type # - # So we have modified the fastai data block API to handle custom `TfmdList`s, let's try these out! def mutli_recreate(): worker_id=get_worker_info() if worker_id is not None: print('reinit',worker_id.id) print(worker_id) [worker_id.dataset.d.items[i].after_create() for i in range(len(worker_id.dataset.d.items))] source=Source(test_mode=True) block=IterableDataBlock( datasets_type=IterableDatasets, get_items=lambda o:[j.after_create() for j in o], splitter=lambda o:[[0]], blocks=TransformBlock( type_tfms=lambda o:iter(o), item_tfms=lambda o:next(o), tl_type=IterableTfmdLists, dls_kwargs={'indexed':False,'shuffle':False,'persistent_workers':True,'pin_memory':True}), ) dls=block.dataloaders([source],n=10,bs=3,num_workers=2,verbose=True,wif=mutli_recreate) for x in dls[0]:print(x) for x in dls[0]:print(x) # ## General Learner Changes # Since our models might be more complicated than simply `self.prod=self.model(*self.xb)`, we want to be able to skip this. # export @patch def _do_one_batch(self:Learner): if not self.delay_pred: self.pred = self.model(*self.xb) self('after_pred') if len(self.yb): self.loss_grad = self.loss_func(self.pred, *self.yb) self.loss = self.loss_grad.clone() self('after_loss') if not self.training or not len(self.yb): return self('before_backward') self.loss_grad.backward() self._with_events(self.opt.step, 'step', CancelStepException) self.opt.zero_grad() # ### Multi Epoch Iteration Issues # Great! Its iterating! But it seems to reset every iteration for some reason, i.e. It should just keep counting up... dls=block.dataloaders([source],n=10,bs=3,num_workers=0,verbose=True,wif=mutli_recreate) for x in dls[0]:print(x) for x in dls[0]:print(x) # You will notice that the culprit seems to be related to whether the dataloader is # doing multiprocessing or not. Interestingly, it seems that persistent workers does not # work (?) # # > Important: Note above # # This is because of the line... # + # DataLoader.__iter__?? # - # You will notice that the loader is redefined per iter. This will happen per epoch then. # Since the workers are tied to a dataloader, it may persist the worker between batches, # however it will not persist them between epochs. This is undesirable. We can try to fix this through by changing how loaders are # handled if persistant worker is set to true. You will find that this does not fix the issue due to core pytorch issues that will be illistrated later... # ### Custom collater's # Finally, we need to change how collation is handled. Since we are returning batch-wise # dictionaries, we want to stack them. You will notice that the [[1,1,1,1]] get turned into [[[1,1,1,1]]]. # We don't want this, so lets change it! # + # export from torch.utils.data._utils.collate import default_collate_err_msg_format,int_classes,string_classes,container_abcs from torch.utils.data._utils.collate import * def vstack_collate(batch): "99% similar to default_collate, however vstacks tensors thus assuming they already have a batch dim" elem = batch[0] elem_type = type(elem) if isinstance(elem, torch.Tensor): out = None if torch.utils.data.get_worker_info() is not None: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = elem.storage()._new_shared(numel) out = elem.new(storage) return torch.vstack(batch, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap': # array of string classes and object if np_str_obj_array_pattern.search(elem.dtype.str) is not None: raise TypeError(default_collate_err_msg_format.format(elem.dtype)) return vstack_collate([torch.as_tensor(b) for b in batch]) elif elem.shape == (): # scalars return torch.as_tensor(batch) elif isinstance(elem, float): return torch.tensor(batch, dtype=torch.float64) elif isinstance(elem, int_classes): return torch.tensor(batch) elif isinstance(elem, string_classes): return batch elif isinstance(elem, container_abcs.Mapping): return {key: vstack_collate([d[key] for d in batch]) for key in elem} elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple return elem_type(*(default_collate(samples) for samples in zip(*batch))) elif isinstance(elem, container_abcs.Sequence): # check to make sure that the elements in batch have consistent size it = iter(batch) elem_size = len(next(it)) if not all(len(elem) == elem_size for elem in it): raise RuntimeError('each element in list of batch should be of equal size') transposed = zip(*batch) return [vstack_collate(samples) for samples in transposed] raise TypeError(default_collate_err_msg_format.format(elem_type)) # + # exports _collate_types = (ndarray, Tensor, typing.Mapping, str) def fr_collate(t): "A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s" b = t[0] return (vstack_collate(t) if isinstance(b, _collate_types) else type(t[0])([fr_collate(s) for s in zip(*t)]) if isinstance(b, Sequence) else vstack_collate(t)) # - # If the `num_worker > 0` and `persistent_workers==True`, then we need to have the loaders be re-dfined outside # of the __iter__ method. # + # exports from fastai.data.load import _loaders class IterableTfmdDL(TfmdDL): def __init__(self, dataset,bs=64, shuffle=False, num_workers=None, verbose=False, do_setup=True,**kwargs): super().__init__(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers,**kwargs) self._loader=None def create_batch(self, b): return (fr_collate,fa_convert)[self.prebatched](b) # - # ### Pytorch persistent worker Limitation # > This is probably the worst part of the RL <-> Pytorch issue. # I thought that: # # - If self.num_workers > 0 # - And fake_l.persistent_workers==True # # All I needed to do was make sure fastai doesn't destroy dls if these above cases were # true. The below code that would be added to `IterableTfmdDL` would have fixed this. # # ```python # def sample(self): # return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs) # # # def __iter__(self): # self.randomize() # self.before_iter() # self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses) # if self.fake_l.num_workers>0 and self.fake_l.persistent_workers and self._loader is None: # self._loader=_loaders[self.fake_l.num_workers==0](self.fake_l) # # for b in ifnone(self._loader,_loaders[self.fake_l.num_workers==0](self.fake_l)): # # for b in _loaders[self.fake_l.num_workers==0](self.fake_l): # if self.device is not None: b = to_device(b, self.device) # yield self.after_batch(b) # self.after_iter() # if hasattr(self, 'it'): del(self.it) # ``` # # This doesn't fix the issue. # # Per https://pytorch.org/docs/stable/data.html: # # persistent_workers (bool, optional) – If True, the data loader will not shutdown the # worker processes after a dataset has been consumed once. # This allows to maintain the workers Dataset instances # alive. (default: False) # # Iterable datasets don't get "consumed" bsaed on if an environment is done. They can consumed # if the dataset reaches an arbitrary length. This means that an agent might be in the middle of # executing an episode, and the dataset will end, and next epoch will start from scratch. # # The is really bad **unless** the user does not use multiprocessing at all, or # we make "n" really really big, so that we can get a few full episodes completed. # # Overall, it would be better to get workers to persist between epochs. # ### Learner compat # Let's try to plug this into a `Learner` then... # + from torch.nn import Linear class FakeModel(Module): def __init__(self): self.fc=Linear(5,5) def forward(self,x): print(x) return x['test_field'] # + # IterableDataBlock?? # + # TransformBlock?? # + # exports def get_sources(_,ls): return [o.after_create() for o in ls] class SourceDataBlock(IterableDataBlock): datasets_type=IterableDatasets get_items=get_sources blocks=TransformBlock( type_tfms=[lambda o:iter(o)], item_tfms=lambda o:next(o), tl_type=IterableTfmdLists, dl_type=IterableTfmdDL, dls_kwargs={'indexed':False,'shuffle':False,'persistent_workers':True}) # - # In addition to above, we also don't want to run evaluation epochs since there isn't a simple # way to split envirnoments between those 2 phases. Maybe in the near future we can have this... # export @patch def _do_epoch_validate(self:Learner,*args,**kwargs): return 0 # Another augmentation we need to do is allow metrics to be run during training time... # export @patch def after_create(self:Callback): for cb in self.learn.cbs: if hasattr(cb,'train_metrics'): cb.train_metrics=True self.learn.delay_pred=True # Since a lot of the learners will only have the `xb` field populated, we need look at # the len of xb also # export @patch def after_batch(self:Recorder): "Update all metrics and records lr and smooth loss in training" if len(self.yb) == 0 and len(self.xb) == 0: return mets = self._train_mets if self.training else self._valid_mets for met in mets: met.accumulate(self.learn) if not self.training: return self.lrs.append(self.opt.hypers[-1]['lr']) self.losses.append(self.smooth_loss.value) self.learn.smooth_loss = self.smooth_loss.value source=Source(test_mode=True,cbs=VerboseChecked) block=SourceDataBlock() dls=block.dataloaders([source],n=5,bs=2,num_workers=0,verbose=True) learn=Learner(dls,FakeModel(),loss_func=lambda o: 0.5) learn.fit(4) # This looks good, however because of the `persistent worker's` issue, if we have `num_workers>>0`... source=Source(test_mode=True,cbs=VerboseChecked) dls=block.dataloaders([source],n=5,bs=2,num_workers=2,verbose=True) learn=Learner(dls,FakeModel(),loss_func=lambda o: 0.5) learn.fit(4) # Yeah... so you will notice that the `source` object seems to be reset every epoch... This is because the workers # are being re-created between epochs. # ## Conclusion # Getting fastai API to work with RL environments isn't too bad. I am looking forward to v3. # The ultamate scary blocker is that fact persistent workers are not persistent for the dl's # life cycle, but instead are persistent for a dl's iteration. # # For now, the most efficient way to run an agent is with `num_workers==0`. I would be interested # in fixing this however. # ## Export # + # hide from fastcore.imports import in_colab # Since colab still requires tornado<6, we don't want to import nbdev if we don't have to if not in_colab(): from nbdev.export import * from nbdev.export2html import * from nbverbose.cli import * make_readme() notebook2script() notebook2html() # -
nbs/05a_data.block.ipynb